repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
KlausPopp/Moddy
|
src/moddy/version.py
|
Python
|
lgpl-3.0
| 51
| 0
|
version = (2, 0, 0)
VERSION = "%d.%
|
d.%d" %
|
version
|
faskiri/barry2gugl
|
dn.py
|
Python
|
apache-2.0
| 3,915
| 0.02069
|
#
# Copyright 2015 Fasih
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governi
|
ng permissions and
# limitations under the License.
#
class DN(object):
def __init__(self, dn):
self._dn
|
= dn.replace(',dn', '')
self._cn = []
self._displayName = []
self._givenName = []
self._homePhone = []
self._homePostalAddress = []
self._mail = []
self._mobile = []
self._o = []
self._objectClass = []
self._sn = []
self._telephoneNumber = []
self._title = []
@property
def dn(self): return self._dn
@property
def cn(self): return self._cn
@cn.setter
def cn(self, v):
self._cn.append(v)
@property
def displayName(self): return self._displayName
@displayName.setter
def displayName(self, v):
self._displayName.append(v)
@property
def givenName(self): return self._givenName
@givenName.setter
def givenName(self, v):
self._givenName.append(v)
@property
def homePhone(self): return self._homePhone
@homePhone.setter
def homePhone(self, v):
self._homePhone.append(v)
@property
def homePostalAddress(self): return self._homePostalAddress
@homePostalAddress.setter
def homePostalAddress(self, v):
self._homePostalAddress.append(v)
@property
def mail(self): return self._mail
@mail.setter
def mail(self, v):
self._mail.append(v)
@property
def mobile(self): return self._mobile
@mobile.setter
def mobile(self, v):
self._mobile.append(v)
@property
def o(self): return self._o
@o.setter
def o(self, v):
self._o.append(v)
@property
def objectClass(self): return self._objectClass
@objectClass.setter
def objectClass(self, v):
self._objectClass.append(v)
@property
def sn(self): return self._sn
@sn.setter
def sn(self, v):
self._sn.append(v)
@property
def telephoneNumber(self): return self._telephoneNumber
@telephoneNumber.setter
def telephoneNumber(self, v):
self._telephoneNumber.append(v)
@property
def title(self): return self._title
@title.setter
def title(self, v):
self._title.append(v)
def csv(self):
items = []
items.append(self.displayName)
items.append(self.givenName)
items.append(self.sn)
items.append(self.title)
items.append(['Home'])
items.append(self.homePhone)
items.append(['Mobile'])
items.append(self.mobile)
items.append(['Mobile'])
items.append(self.telephoneNumber)
items.append(['Home'])
items.append(self.homePostalAddress)
items.append(self.mail)
items.append(self.o)
return ','.join([' ::: '.join([x.replace(',', ' ') for x in i]) for i in items])
def __str__(self):
s = 'DN<dn=%s' % self._dn
if self.cn != []: s += ', cn=%s' % self.cn
if self.displayName != []: s += ', displayName=%s' % self.displayName
if self.givenName != []: s += ', givenName=%s' % self.givenName
if self.homePhone != []: s += ', homePhone=%s' % self.homePhone
if self.homePostalAddress != []: s += ', homePostalAddress=%s' % self.homePostalAddress
if self.mail != []: s += ', mail=%s' % self.mail
if self.mobile != []: s += ', mobile=%s' % self.mobile
if self.o != []: s += ', o=%s' % self.o
if self.objectClass != []: s += ', objectClass=%s' % self.objectClass
if self.sn != []: s += ', sn=%s' % self.sn
if self.telephoneNumber != []: s += ', telephoneNumber=%s' % self.telephoneNumber
if self.title != []: s += ', title=%s' % self.title
return s + '>'
|
abhijitbangera/ecommerce
|
src/carts/migrations/0007_merge.py
|
Python
|
mit
| 293
| 0
|
# -*- coding: utf-8 -*-
from __future__ import u
|
nicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('carts', '0006_auto_20150930_1739'),
('carts', '0005_auto_20151022_2158'),
]
opera
|
tions = [
]
|
rtfd/sphinx-autoapi
|
autoapi/toctree.py
|
Python
|
mit
| 5,871
| 0.001533
|
"""
A small Sphinx extension that adds Domain objects (eg. Python Classes & Methods) to the TOC Tree.
It dynamically adds them to the already rendered ``app.env.tocs`` dict on the Sphinx environment.
Traditionally this only contains Section's,
we then nest our Domain references inside the already existing Sections.
"""
from docutils import nodes
from sphinx import addnodes
import sphinx.util.logging
LOGGER = sphinx.util.logging.getLogger(__name__)
def _build_toc_node(docname, anchor="anchor", text="test text", bullet=False):
"""
Create the node structure that Sphinx expects for TOC Tree entries.
The ``bullet`` argument wraps it in a ``nodes.bullet_list``,
which is how you nest TOC Tree entries.
"""
reference = nodes.reference(
"",
"",
internal=True,
refuri=docname,
anchorname="#" + anchor,
*[nodes.Text(text, text)]
)
para = addnodes.compact_paragraph("", "", reference)
ret_list = nodes.list_item("", para)
return nodes.bullet_list("", ret_list) if bullet else ret_list
def _traverse_parent(node, objtypes):
"""
Traverse up the node's parents until you hit the ``objtypes`` referenced.
Can either be a single type,
or a tuple of types.
"""
curr_node = node.parent
while curr_node is not None:
if isinstance(curr_node, objtypes):
return curr_node
curr_node = curr_node.parent
return None
def _find_toc_node(toc, ref_id, objtype):
"""
Find the actual TOC node for a ref_id.
Depends on the object type:
* Section - First section (refuri) or 2nd+ level section (anchorname)
* Desc - Just use the anchor name
"""
for check_node in toc.traverse(nodes.reference):
if objtype == nodes.section and (
check_node.attributes["refuri"] == ref_id
or check_node.attributes["anchorname"] == "#" + ref_id
):
return check_node
if (
objtype == addnodes.desc
and check_node.attributes["anchorname"] == "#" + ref_id
):
return check_node
return None
def _get_toc_reference(node, toc, docname):
"""
Logic that understands maps a specific node to it's part of the toctree.
It takes a specific incoming ``node``,
and returns the actual TOC Tree node that is said reference.
"""
if isinstance(node, nodes.section) and isinstance(node.parent, nodes.document):
# Top Level Section header
ref_id = docname
toc_reference = _find_toc_node(toc, ref_id, nodes.section)
elif isinstance(node, nodes.section):
# Nested Section header
ref_id = node.attributes["ids"][0]
toc_reference = _find_toc_node(toc, ref_id, nodes.section)
else:
# Desc node
try:
ref_id = node.children[0].attributes["ids"][0]
toc_reference = _find_toc_node(toc, ref_id, addnodes.desc)
except (KeyError, IndexError):
LOGGER.warning("Invalid desc node", exc_info=True)
toc_reference = None
return toc_reference
def add_domain_to_toctree(app, doctree, docname):
"""
Add domain objects to the toctree dynamically.
This should be attached to the ``doctree-resolved`` event.
This works by:
* Finding each domain node (addnodes.desc)
* Figuring out it's parent that will be in the toctree
(nodes.section, or a previously added addnodes.desc)
* Finding that parent in the TOC Tree based on it's ID
* Taking that element in the TOC Tree,
and finding it's parent that is a TOC Listing (nodes.bullet_list)
* Adding the new TOC element for our specific node as a child of that nodes.bullet_list
* This checks that bullet_list's last child,
and checks that it is also a nodes.bullet_list,
effectively nesting it under that element
"""
toc = app.env.tocs[docname]
for desc_node in doctree.traverse(addnodes.desc):
try:
ref_id = desc_node.children[0].attributes["ids"][0]
except (KeyError, IndexError):
LOGGER.warning("Invalid desc node", exc_info=True)
continue
try:
# Python domain object
ref_text = desc_node[0].attributes["fullname"].split(".")[-1].split("(")[0]
except (KeyError, IndexError):
# TODO[eric]: Support other Domains and ways of accessing this data
# Use `astext` for other types of domain objects
ref_text = desc_node[0].astext().split(".")[-1].split("(")[0]
# This is the actual object that will exist in the TOC Tree
# Sections b
|
y default, and other Desc nodes that we've previously placed.
parent_node = _traverse_parent(
node=desc_node, objtypes=(addnodes.desc, nodes.section)
)
if parent
|
_node:
toc_reference = _get_toc_reference(parent_node, toc, docname)
if toc_reference:
# Get the last child of our parent's bullet list, this is where "we" live.
toc_insertion_point = _traverse_parent(
toc_reference, nodes.bullet_list
)[-1]
# Ensure we're added another bullet list so that we nest inside the parent,
# not next to it
if toc_insertion_point and isinstance(
toc_insertion_point[0], nodes.bullet_list
):
new_insert = toc_insertion_point[0]
to_add = _build_toc_node(docname, anchor=ref_id, text=ref_text)
new_insert.append(to_add)
else:
to_add = _build_toc_node(
docname, anchor=ref_id, text=ref_text, bullet=True
)
toc_insertion_point.append(to_add)
|
TieWei/nova
|
nova/virt/libvirt/imagebackend.py
|
Python
|
apache-2.0
| 23,315
| 0.000815
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import contextlib
import os
from oslo.config import cfg
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.disk import api as disk
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as libvirt_utils
try:
import rbd
except ImportError:
rbd = None
__imagebackend_opts = [
cfg.StrOpt('libvirt_images_type',
default='default',
help='VM Images format. Acceptable values are: raw, qcow2, lvm,'
'rbd, default. If default is specified,'
' then use_cow_images flag is used instead of this one.'),
cfg.StrOpt('libvirt_images_volume_group',
help='LVM Volume Group that is used for VM images, when you'
' specify libvirt_images_type=lvm.'),
cfg.BoolOpt('libvirt_sparse_logical_volumes',
default=False,
help='Create sparse logical volumes (with virtualsize)'
' if this flag is set to True.'),
cfg.IntOpt('libvirt_lvm_snapshot_size',
default=1000,
help='The amount of storage (in megabytes) to allocate for LVM'
' snapshot copy-on-write blocks.'),
cfg.StrOpt('libvirt_images_rbd_pool',
default='rbd',
help='the RADOS pool in which rbd volumes are stored'),
cfg.StrOpt('libvirt_images_rbd_ceph_conf',
default='', # default determined by librados
help='path to the ceph configuration file to use'),
]
CONF = cfg.CONF
CONF.register_opts(__imagebackend_opts)
CONF.import_opt('base_dir_name', 'nova.virt.libvirt.imagecache')
CONF.import_opt('preallocate_images', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
class Image(object):
__metaclass__ = abc.ABCMeta
def __init__(self, source_type, driver_format, is_block_dev=False):
"""Image initialization.
:source_type: block or file
:driver_format: raw or qcow2
:is_block_dev:
"""
self.source_type = source_type
self.driver_format = driver_format
self.is_block_dev = is_block_dev
self.preallocate = False
# NOTE(mikal): We need a lock directory which is shared along with
# instance files, to cover the scenario where multiple compute nodes
# are trying to create a base file at the same time
self.lock_path = os.path.join(CONF.instances_path, 'locks')
@abc.abstractmethod
def create_image(self, prepare_template, base, size, *args, **kwargs):
"""Create image from template.
Contains specific behavior for each image type.
:prepare_template: function, that creates template.
Should accept `target` argument.
:base: Template name
:size: Size of created image in bytes
"""
pass
def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode,
extra_specs, hypervisor_version):
"""Get `LibvirtConfigGuestDisk` filled for this image.
:disk_dev: Disk bus device name
:disk_bus: Disk bus type
:device_type: Device type for this image.
:cache_mode: Caching mode for this image
:extra_specs: Instance type extra specs dict.
"""
info = vconfig.LibvirtConfigGuestDisk()
info.source_type = self.source_type
info.source_device = device_type
info.target_bus = disk_bus
info.target_dev = disk_dev
info.driver_cache = cache_mode
info.driver_format = self.driver_format
driver_name = libvirt_utils.pick_disk_driver_name(hypervisor_version,
self.is_block_dev)
info.driver_name = driver_name
info.source_path = self.path
tune_items = ['disk_read_bytes_sec', 'disk_read_iops_sec',
'disk_write_bytes_sec', 'disk_write_iops_sec',
'disk_total_bytes_sec', 'disk_total_iops_sec']
# Note(yaguang): Currently, the only tuning available is Block I/O
# throttling for qemu.
if self.source_type in ['file', 'block']:
for key, value in extra_specs.iteritems():
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in tune_items:
setattr(info, scope[1], value)
return info
def check_image_exists(self):
return os.path.exists(self.path)
def cache(self, fetch_func, filename, size=None, *args, **kwargs):
"""Creates image from template.
Ensures that template and image not already exists.
Ensures that base directory exists.
Synchronizes on template fetching.
:fetch_func: Function that creates the base image
Should accept `target` argument.
:filename: Name of the file in the image directory
:size: Size of created image in bytes (optional)
"""
@utils.synchronized(filename, external=True, lock_path=self.lock_path)
def call_if_not_exists(target, *args, **kwargs):
if not os.path.exists(target):
fetch_func(target=target, *args, **kwargs)
elif CONF.libvirt_images_type == "lvm" and \
'ephemeral_size' in kwargs:
fetch_func(target=target, *args, **kwargs)
|
base_dir = os.path.join(CONF.instances_path, CONF.base_dir_name)
if not os.path.exists(base_dir):
fileutils.ensure_tree(base_dir)
base = os.path.join(base_dir, filename)
if not self.check_image_exists() or not os.path.exists(base):
self.create
|
_image(call_if_not_exists, base, size,
*args, **kwargs)
if (size and self.preallocate and self._can_fallocate() and
os.access(self.path, os.W_OK)):
utils.execute('fallocate', '-n', '-l', size, self.path)
def _can_fallocate(self):
"""Check once per class, whether fallocate(1) is available,
and that the instances directory supports fallocate(2).
"""
can_fallocate = getattr(self.__class__, 'can_fallocate', None)
if can_fallocate is None:
_out, err = utils.trycmd('fallocate', '-n', '-l', '1',
self.path + '.fallocate_test')
fileutils.delete_if_exists(self.path + '.fallocate_test')
can_fallocate = not err
self.__class__.can_fallocate = can_fallocate
if not can_fallocate:
LOG.error('Unable to preallocate_images=%s at path: %s' %
(CONF.preallocate_images, self.path))
return can_fallocate
@staticmethod
def verify_base_size(base, size, base_size=0):
"""Check that the base image is not larger than size.
Since images can't be generally shrunk, enforce this
constraint taking account of virtual image size.
"""
# Note(pbrady): The size and min_disk parameters of a glance
# image are checked against the instance size before the image
# is even downloaded from glance, but currently min_disk is
# adjustable and doesn't currently acc
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_huff_darklighter.py
|
Python
|
mit
| 452
| 0.04646
|
#### NOTICE: THIS FIL
|
E IS AUTOGENERATED
#### M
|
ODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_huff_darklighter.iff"
result.attribute_template_id = 9
result.stfName("theme_park_name","base_npc_theme_park")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
tosmun/AdventOfCode
|
solutions/day1/p1/main.py
|
Python
|
apache-2.0
| 233
| 0.042918
|
floor = 0
with open('../input.txt', 'r') as fp:
while True:
bu
|
ffer = fp.read(1024)
if buffer is None or len(buffer) <= 0:
break
for c in buffer:
if c == '(':
floor += 1
elif c == ')':
floor -= 1
print f
|
loor
|
dbaxa/GitPython
|
git/repo/base.py
|
Python
|
bsd-3-clause
| 30,180
| 0.00666
|
# repo.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from git.exc import InvalidGitRepositoryError, NoSuchPathError
from git.cmd import Git
from git.util import Actor
from git.refs import *
from git.index import IndexFile
from git.objects import *
from git.config import GitConfigParser
from git.remote import (
Remote,
digest_process_messages,
finalize_process,
add_progress
)
from git.db import (
GitCmdObjectDB,
GitDB
)
from gitdb.util import (
join,
isfile,
hex_to_bin
)
from fun import (
rev_parse,
is_git_dir,
find_git_dir,
touch
)
import os
import sys
import re
DefaultDBType = GitDB
if sys.version_info[1] < 5: # python 2.4 compatiblity
DefaultDBType = GitCmdObjectDB
# END handle python 2.4
__all__ = ('Repo', )
class Repo(object):
"""Represents a git repository and allows you to query references,
gather commit information, generate diffs, create and clone repositories query
the log.
The following attributes are worth using:
'working_dir' is the working directory of the git command, wich is the working tree
directory if available or the .git directory in case of bare repositories
'working_tree_dir' is the working tree directory, but will raise AssertionError
if we are a bare repository.
'git_dir' is the .git repository directoy, which is always set."""
DAEMON_EXPORT_FILE = 'git-daemon-export-ok'
__slots__ = ( "working_dir", "_working_tree_dir", "git_dir", "_bare", "git", "odb" )
# precompiled regex
re_whitespace = re.compile(r'\s+')
re_hexsha_only = re.compile('^[0-9A-Fa-f]{40}$')
re_hexsha_shortened = re.compile('^[0-9A-Fa-f]{4,40}$')
re_author_committer_start = re.compile(r'^(author|committer)')
re_tab_full_line = re.compile(r'^\t(.*)$')
# invariants
# represents the configuration level of a configuration file
config_level = ("system", "global", "repository")
def __init__(self, path=None, odbt = DefaultDBType):
"""Create a new Repo instance
:param path: is the path to either the root git directory or the bare git repo::
repo = Repo("/Users/mtrier/Development/git-python")
repo = Repo("/Users/mtrier/Development/git-python.git")
repo = Repo("~/Development/git-python.git")
repo = Repo("$REPOSITORIES/Development/git-python.git")
:param odbt: Object DataBase type - a type which is constructed by providing
the directory containing the database objects, i.e. .git/objects. It will
be used to access all object data
:raise InvalidGitRepositoryError:
:raise NoSuchPathError:
:return: git.Repo """
epath = os.path.abspath(os.path.expandvars(os.path.expanduser(path or os.getcwd())))
if not os.path.exists(epath):
raise NoSuchPathError(epath)
self.working_dir = None
self._working_tree_dir = None
self.git_dir = None
curpath = epath
# walk up the path to find the .git dir
while curpath:
if is_git_dir(curpath):
self.git_dir = curpath
self._working_tree_dir = os.path.dirname(curpath)
break
gitpath = find_git_dir(join(curpath, '.git'))
if gitpath is not None:
self.git_dir = gitpath
self._working_tree_dir = curpath
break
curpath, dummy = os.path.split(curpath)
if not dummy:
break
# END while curpath
if self.git_dir is None:
raise InvalidGitRepositoryError(epath)
self._bare = False
try:
self._bare = self.config_reader("repository").getboolean('core','bare')
except Exception:
# lets not assume the option exists, although it should
pass
# adjust the wd in case we are actually bare - we didn't know that
# in the first place
if self._bare:
self._working_tree_dir = None
# END working dir handling
self.working_dir = self._working_tree_dir or self.git_dir
self.git = Git(self.working_dir)
# special handling, in special times
args = [join(self.git_dir, 'objects')]
if issubclass(odbt, GitCmdObjectDB):
args.append(self.git)
self.odb = odbt(*args)
def __eq__(self, rhs):
if isinstance(rhs, Repo):
return self.git_dir == rhs.git_dir
return False
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __hash__(self):
return hash(self.git_dir)
def __repr__(self):
return "%s(%r)" % (type(self).__name__
|
, self.git_dir)
# Description property
def _get_description(self):
filename = join(self.git_dir, 'description')
return file(filename).read().rstrip()
def _set_description(self, descr):
filename = join(self.git_dir, 'description')
file(filename, 'w').write(descr+'\n')
description = property(_get_description, _set_description,
doc="the project's description")
del _get_descrip
|
tion
del _set_description
@property
def working_tree_dir(self):
""":return: The working tree directory of our git repository
:raise AssertionError: If we are a bare repository"""
if self._working_tree_dir is None:
raise AssertionError( "Repository at %r is bare and does not have a working tree directory" % self.git_dir )
return self._working_tree_dir
@property
def bare(self):
""":return: True if the repository is bare"""
return self._bare
@property
def heads(self):
"""A list of ``Head`` objects representing the branch heads in
this repo
:return: ``git.IterableList(Head, ...)``"""
return Head.list_items(self)
@property
def references(self):
"""A list of Reference objects representing tags, heads and remote references.
:return: IterableList(Reference, ...)"""
return Reference.list_items(self)
# alias for references
refs = references
# alias for heads
branches = heads
@property
def index(self):
""":return: IndexFile representing this repository's index."""
return IndexFile(self)
@property
def head(self):
""":return: HEAD Object pointing to the current head reference"""
return HEAD(self,'HEAD')
@property
def remotes(self):
"""A list of Remote objects allowing to access and manipulate remotes
:return: ``git.IterableList(Remote, ...)``"""
return Remote.list_items(self)
def remote(self, name='origin'):
""":return: Remote with the specified name
:raise ValueError: if no remote with such a name exists"""
return Remote(self, name)
#{ Submodules
@property
def submodules(self):
"""
:return: git.IterableList(Submodule, ...) of direct submodules
available from the current head"""
return Submodule.list_items(self)
def submodule(self, name):
""" :return: Submodule with the given name
:raise ValueError: If no such submodule exists"""
try:
return self.submodules[name]
except IndexError:
raise ValueError("Didn't find submodule named %r" % name)
# END exception handling
def create_submodule(self, *ar
|
AutorestCI/azure-sdk-for-python
|
azure-servicefabric/azure/servicefabric/models/applications_health_evaluation.py
|
Python
|
mit
| 2,699
| 0.002223
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .health_evaluation import HealthEvaluation
class ApplicationsHealthEvaluation(HealthEv
|
aluation):
"""Represents health evaluation for applications, containing health
evaluations for each unhealthy application that impacted current aggregated
health state.
:param aggregated_health_state: Possible values include: 'Invalid', 'Ok',
'Warning', 'E
|
rror', 'Unknown'
:type aggregated_health_state: str or :class:`enum
<azure.servicefabric.models.enum>`
:param description: Description of the health evaluation, which represents
a summary of the evaluation process.
:type description: str
:param kind: Polymorphic Discriminator
:type kind: str
:param max_percent_unhealthy_applications: Maximum allowed percentage of
unhealthy applications from the ClusterHealthPolicy.
:type max_percent_unhealthy_applications: int
:param total_count: Total number of applications from the health store.
:type total_count: long
:param unhealthy_evaluations:
:type unhealthy_evaluations: list of :class:`HealthEvaluationWrapper
<azure.servicefabric.models.HealthEvaluationWrapper>`
"""
_validation = {
'kind': {'required': True},
}
_attribute_map = {
'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'},
'description': {'key': 'Description', 'type': 'str'},
'kind': {'key': 'Kind', 'type': 'str'},
'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'},
'total_count': {'key': 'TotalCount', 'type': 'long'},
'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'},
}
def __init__(self, aggregated_health_state=None, description=None, max_percent_unhealthy_applications=None, total_count=None, unhealthy_evaluations=None):
super(ApplicationsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description)
self.max_percent_unhealthy_applications = max_percent_unhealthy_applications
self.total_count = total_count
self.unhealthy_evaluations = unhealthy_evaluations
self.kind = 'Applications'
|
pawhewitt/Dev
|
SU2_PY/SU2/run/adaptation.py
|
Python
|
lgpl-2.1
| 2,501
| 0.015194
|
#!/usr/bin/env python
## \file adjoint.py
# \brief python package for running adjoint problems
# \author T. Lukaczyk, F. Palacios
# \version 5.0.0 "Raven"
#
# SU2 Original Developers: Dr. Francisco D. Palacios.
# Dr. Thomas D. Economon.
#
# SU2 Developers: Prof. Juan J. Alonso's group at Stanford University.
# Prof. Piero Colonna's group at Delft University of Technology.
# Prof. Nicolas R. Gauger's
|
group at Kaisers
|
lautern University of Technology.
# Prof. Alberto Guardone's group at Polytechnic University of Milan.
# Prof. Rafael Palacios' group at Imperial College London.
# Prof. Edwin van der Weide's group at the University of Twente.
# Prof. Vincent Terrapon's group at the University of Liege.
#
# Copyright (C) 2012-2017 SU2, the open-source CFD code.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
import os, sys, shutil, copy
from .. import io as su2io
from .. import mesh as su2mesh
def adaptation ( config , kind='' ):
# local copy
konfig = copy.deepcopy(config)
# check kind
if kind: konfig['KIND_ADAPT'] = kind
kind = konfig.get('KIND_ADAPT','NONE')
if kind == 'NONE':
return {}
# check adapted?
# get adaptation function
adapt_function = su2mesh.adapt.name_map[kind]
# setup problem
suffix = 'adapt'
meshname_orig = konfig['MESH_FILENAME']
meshname_new = su2io.add_suffix( konfig['MESH_FILENAME'], suffix )
konfig['MESH_OUT_FILENAME'] = meshname_new
# Run Adaptation
info = adapt_function(konfig)
# update super config
config['MESH_FILENAME'] = meshname_new
config['KIND_ADAPT'] = kind
# files out
files = { 'MESH' : meshname_new }
# info out
append_nestdict( info, { 'FILES' : files } )
return info
|
Phelimb/atlas
|
scripts/newick2json.py
|
Python
|
mit
| 1,160
| 0
|
#! /usr/bin/env python
import sys
from ete2 import Tree
import random
def get_json(node):
# Read ETE tag for duplication or speciation events
if not hasattr(node, 'evoltype'):
dup = random.sample(['N', 'Y'], 1)[0]
elif node.evoltype == "S":
dup = "N"
elif node.evoltype == "D":
dup = "Y"
node.name = node.name.replace("'", '')
json = {"name": node.name,
# "display_label": node.name,
# "duplication": dup,
# "branch_length": str(node.dist),
# "common_name": node.name,
# "seq_length": 0,
"type": "node" if node.children else "leaf",
# "uniprot_name": "Unknown",
}
if node.children:
json["children"] = []
for ch in node.children:
json["children"].append(get_json(ch))
return
|
json
if __name__ == '__main__':
if len(sys.argv) > 1:
t = Tree(sys.argv[1])
else:
# create a random example tree
t = Tree()
|
t.populate(100, random_branches=True)
# TreeWidget seems to fail with simple quotes
print str(get_json(t)).replace("'", '"')
|
Hasimir/brython
|
www/src/Lib/encodings/iso8859_13.py
|
Python
|
bsd-3-clause
| 13,578
| 0.020916
|
""" Python Character Mapping Codec iso8859_13 generated
|
from 'MAPPINGS/ISO8859/8859-13.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return cod
|
ecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-13',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u201d' # 0xA1 -> RIGHT DOUBLE QUOTATION MARK
'\xa2'
|
yasir1brahim/OLiMS
|
lims/idserver.py
|
Python
|
agpl-3.0
| 8,055
| 0.003228
|
from dependencies.dependency import ModuleSecurityInfo#, #allow_module
from dependencies.dependency import DateTime
from dependencies.dependency import DisplayList
from dependencies.dependency import getToolByName
from dependencies.dependency import TranslationServiceTool
from lims.browser import BrowserView
from lims import bikaMessageFactory as _
from lims.utils import t
from lims import interfaces
from lims import logger
from dependencies.dependency import IFileNameNormalizer
from dependencies.dependency import IIDNormalizer
from dependencies.dependency import getUtility
from dependencies.dependency import providedBy
import copy,re,urllib
from dependencies import transaction
class IDServerUnavailable(Exception):
pass
def idserver_generate_id(context, prefix, batch_size = None):
""" Generate a new id using external ID server.
"""
plone = context.portal_url.getPortalObject()
url = context.bika_setup.getIDServerURL()
try:
if batch_size:
# GET
f = urllib.urlopen('%s/%s/%s?%s' % (
url,
plone.getId(),
prefix,
urllib.urlencode({'batch_size': batch_size}))
)
else:
f = urllib.urlopen('%s/%s/%s'%(url, plone.getId(), prefix))
new_id = f.read()
f.close()
except:
from sys import exc_info
info = exc_info()
import zLOG; zLOG.LOG('INFO', 0, '', 'generate_id raised exception: %s, %s \n ID server URL: %s' % (info[0], info[1], url))
raise IDServerUnavailable(_('ID Server unavailable'))
return new_id
def generateUniqueId(context):
""" Generate pretty content IDs.
- context is used to find portal_type; in case there is no
prefix specified for the type, the normalized portal_type is
used as a prefix instead.
"""
fn_normalize = getUtility(IFileNameNormalizer).normalize
|
id_normalize = getUtility(IIDNormalizer).normalize
prefixes = context.bika_setup.getPrefixes()
year = context.bika_setup.getYearInPrefix() and \
DateTime().strftime("%Y")[2:] or ''
separator = '-'
for e in prefixes:
if 'separator' not in e:
e['separator'] = ''
if e['portal_type'] == context.portal_type:
separator = e['separator']
# Analysis Request IDs
if context.portal_typ
|
e == "AnalysisRequest":
sample = context.getSample()
s_prefix = fn_normalize(sample.getSampleType().getPrefix())
sample_padding = context.bika_setup.getSampleIDPadding()
ar_padding = context.bika_setup.getARIDPadding()
sample_id = sample.getId()
sample_number = sample_id.split(s_prefix)[1]
ar_number = sample.getLastARNumber()
ar_number = ar_number and ar_number + 1 or 1
return fn_normalize(
("%s%s" + separator + "R%s") % (s_prefix,
str(sample_number).zfill(sample_padding),
str(ar_number).zfill(ar_padding))
)
# Sample Partition IDs
if context.portal_type == "SamplePartition":
# We do not use prefixes. There are actually codes that require the 'P'.
# matches = [p for p in prefixes if p['portal_type'] == 'SamplePartition']
# prefix = matches and matches[0]['prefix'] or 'samplepartition'
# padding = int(matches and matches[0]['padding'] or '0')
# at this time the part exists, so +1 would be 1 too many
partnr = str(len(context.aq_parent.objectValues('SamplePartition')))
# parent id is normalized already
return ("%s" + separator + "P%s") % (context.aq_parent.id, partnr)
if context.bika_setup.getExternalIDServer():
# if using external server
for d in prefixes:
# Sample ID comes from SampleType
if context.portal_type == "Sample":
prefix = context.getSampleType().getPrefix()
padding = context.bika_setup.getSampleIDPadding()
new_id = str(idserver_generate_id(context, "%s%s-" % (prefix, year)))
if padding:
new_id = new_id.zfill(int(padding))
return ('%s%s' + separator + '%s') % (prefix, year, new_id)
elif d['portal_type'] == context.portal_type:
prefix = d['prefix']
padding = d['padding']
new_id = str(idserver_generate_id(context, "%s%s-" % (prefix, year)))
if padding:
new_id = new_id.zfill(int(padding))
return ('%s%s' + separator + '%s') % (prefix, year, new_id)
# no prefix; use portal_type
# year is not inserted here
# portal_type is be normalized to lowercase
npt = id_normalize(context.portal_type)
new_id = str(idserver_generate_id(context, npt + "-"))
return ('%s' + separator + '%s') % (npt, new_id)
else:
# No external id-server.
def next_id(prefix):
# normalize before anything
prefix = fn_normalize(prefix)
plone = context.portal_url.getPortalObject()
# grab the first catalog we are indexed in.
at = getToolByName(plone, 'archetype_tool')
if context.portal_type in at.catalog_map:
catalog_name = at.catalog_map[context.portal_type][0]
else:
catalog_name = 'portal_catalog'
catalog = getToolByName(plone, catalog_name)
# get all IDS that start with prefix
# this must specifically exclude AR IDs (two -'s)
rr = re.compile("^"+prefix+separator+"[\d+]+$")
ids = [int(i.split(prefix+separator)[1]) \
for i in catalog.Indexes['id'].uniqueValues() \
if rr.match(i)]
#plone_tool = getToolByName(context, 'plone_utils')
#if not plone_tool.isIDAutoGenerated(l.id):
ids.sort()
_id = ids and ids[-1] or 0
new_id = _id + 1
return str(new_id)
for d in prefixes:
if context.portal_type == "Sample":
# Special case for Sample IDs
prefix = fn_normalize(context.getSampleType().getPrefix())
padding = context.bika_setup.getSampleIDPadding()
sequence_start = context.bika_setup.getSampleIDSequenceStart()
new_id = next_id(prefix+year)
# If sequence_start is greater than new_id. Set
# sequence_start as new_id. (Jira LIMS-280)
if sequence_start > int(new_id):
new_id = str(sequence_start)
if padding:
new_id = new_id.zfill(int(padding))
return ('%s%s' + separator + '%s') % (prefix, year, new_id)
elif d['portal_type'] == context.portal_type:
prefix = d['prefix']
padding = d['padding']
sequence_start = d.get("sequence_start", None)
new_id = next_id(prefix+year)
# Jira-tracker LIMS-280
if sequence_start and int(sequence_start) > int(new_id):
new_id = str(sequence_start)
if padding:
new_id = new_id.zfill(int(padding))
return ('%s%s' + separator + '%s') % (prefix, year, new_id)
# no prefix; use portal_type
# no year inserted here
# use "IID" normalizer, because we want portal_type to be lowercased.
prefix = id_normalize(context.portal_type);
new_id = next_id(prefix)
return ('%s' + separator + '%s') % (prefix, new_id)
def renameAfterCreation(obj):
# Can't rename without a subtransaction commit when using portal_factory
transaction.savepoint(optimistic=True)
# The id returned should be normalized already
new_id = generateUniqueId(obj)
obj.aq_inner.aq_parent.manage_renameObject(obj.id, new_id)
return new_id
|
1001genomes/AraGWAS
|
aragwas_server/gwasdb/migrations/0007_study_n_hits_thr.py
|
Python
|
mit
| 462
| 0
|
# -*- cod
|
ing: utf-8 -*-
# Generated by Django 1.11b1 on 2017-06-23 09:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gwasdb', '0006_auto_20170623_0933'),
]
operations = [
migrations.AddField(
model_name='study',
name='n_hits_th
|
r',
field=models.IntegerField(blank=True, null=True),
),
]
|
trilan/lemon-filebrowser
|
setup.py
|
Python
|
bsd-3-clause
| 998
| 0
|
import codecs
import os
from setuptools import setup, find_packages
def read(filename):
filepath = os.path.join(os.path.dirname(__file__), filename)
return codecs.open(filepath, encoding='utf-8').read()
setup(
name='lemon-filebrowser',
version='0.1.2',
license='ISC',
description="Fork of Patrick Kranzlmueller's django-filebrowser app.",
url='https://github.com/trilan/lemon-filebrowser',
author='Trilan Team',
author_email='dev@lemon.io',
packages=find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Operating System :: OS Indep
|
endent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Pytho
|
n :: 2.7',
'Topic :: Internet :: WWW/HTTP',
],
)
|
pyrrho314/recipesystem
|
trunk/astrodata/samples/astrodata_Sample/RECIPES_Sample/primitives/primitives_OBSERVED.py
|
Python
|
mpl-2.0
| 968
| 0.007231
|
from astrodata.ReductionObjects import PrimitiveSet
class OBSERVEDPrimitives(PrimitiveSet):
astrotype = "OBSERVED"
def init(self, rc):
print "OBSERVEDPrimitives.init(rc)"
return
def typeSpecificPrimitive(self, rc):
print "OBSERVEDPrimitives::typeSpecificPrimitive()"
def mark(self, rc):
for ad in rc.get_inputs_as_astrodata():
if ad.is_type("MARK
|
ED"):
print "OBSERVEDPrimitives::mark(%s) already marked" % ad.filename
else:
ad.phu_set_key_value("S_MARKED", "TRUE")
rc.report_output(ad)
yield rc
def unmark(self, rc):
for ad in rc.get_inputs_as_astrodata():
|
if ad.is_type("UNMARKED"):
print "OBSERVEDPrimitives::unmark(%s) not marked" % ad.filename
else:
ad.phu_set_key_value("S_MARKED", None)
rc.report_output(ad)
yield rc
|
dimven/SpringNodes
|
py/Element.IsCut.py
|
Python
|
mit
| 932
| 0.032189
|
import clr
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
doc = DocumentManager.Instance.CurrentDBDocument
clr.AddReference("RevitAPI")
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
imp
|
ort Revit
clr.ImportExtensions(Revit.Elements)
def tolist(obj1):
if hasattr(obj1,"__iter__"): return obj1
else: return [obj1]
elements = UnwrapElement(tolist(IN[0]))
out1 = []
cutters = []
cutU = InstanceVoidCutUtils
for i in xrange(len(elements)):
try:
if cutU.CanBeCutWithVoid(elements[i]):
cut1 = cutU.GetCuttingVoidInstances(elements[i])
if cut1.Count == 0
|
:
out1.append(False)
cutters.append([])
else:
out1.append(True)
cut1 = [doc.GetElement(id).ToDSType(True) for id in cut1]
cutters.append(cut1)
else:
out1.append(False)
cutters.append([])
except:
out1.append(False)
cutters.append([])
OUT = out1, cutters
|
openstack/renderspec
|
renderspec/distloader.py
|
Python
|
apache-2.0
| 2,522
| 0
|
#!/usr/bin/python
# Copyright (c) 2016 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jinja2
from jinja2.loaders import TemplateNotFound
from jinja2.utils import open_if_exists
import os
def get_dist_templates_path():
return os.path.join(os.path.dirname(__file__), 'dist-templates')
class RenderspecLoader(jinja2.BaseLoader):
"""A special template loader which allows rendering supplied .spec template
with distro specific blocks maintained as part of renderspec.
'.spec' returns the spec template (which you need to supply during init)
while other strings map to corresponding child templates included
in renderspec which simply extend the '.spec' template.
"""
base_ref = '.spec'
template_postfix = '.spec.j2'
def __init__(self, template_fn, encoding='utf-8'):
self.base_fn = template_fn
self.encoding = encoding
self.disttemp_path = get_
|
dist_templates_pat
|
h()
def get_source(self, environment, template):
if template == self.base_ref:
fn = self.base_fn
else:
fn = os.path.join(self.disttemp_path,
template + self.template_postfix)
f = open_if_exists(fn)
if not f:
return TemplateNotFound(template)
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = os.path.getmtime(self.base_fn)
def uptodate():
try:
return os.path.getmtime(self.base_fn) == mtime
except OSError:
return False
return contents, fn, uptodate
def list_templates(self):
found = set([self.base_ref])
walk_dir = os.walk(self.disttemp_path)
for _, _, filenames in walk_dir:
for fn in filenames:
if fn.endswith(self.template_postfix):
template = fn[:-len(self.template_postfix)]
found.add(template)
return sorted(found)
|
croxis/SpaceDrive
|
spacedrive/renderpipeline/rplibs/yaml/yaml_py2/parser.py
|
Python
|
mit
| 26,131
| 0.002334
|
# The following YAML grammar is LL(1) and is parsed by a recursive descent
# parser.
#
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
# block_node_or_indentless_sequence ::=
# ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# FIRST sets:
#
# stream: { STREAM-START }
# explicit_document: { DIRECTIVE DOCUMENT-START }
# implicit_document: FIRST(block_node)
# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_sequence: { BLOCK-SEQUENCE-START }
# block_mapping: { BLOCK-MAPPING-START }
# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
# indentless_sequence: { ENTRY }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_sequence: { FLOW-SEQUENCE-START }
# flow_mapping: { FLOW-MAPPING-START }
# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
__all__ = ['Parser', 'ParserError']
from error import MarkedYAMLError
from tokens import *
from events import *
from scanner import *
class ParserError(MarkedYAMLError):
pass
class Parser(object):
# Since writing a recursive-descendant parser is a straightforward task, we
# do not give many comments here.
DEFAULT_TAGS = {
u'!': u'!',
u'!!': u'tag:yaml.org,2002:',
}
def __init__(self):
self.current_event
|
= None
self.yaml_version = None
self.tag_handles = {}
self.states = []
self.marks = []
self.state = self.parse_stream_start
def dispose(
|
self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def check_event(self, *choices):
# Check the type of the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
if self.current_event is not None:
if not choices:
return True
for choice in choices:
if isinstance(self.current_event, choice):
return True
return False
def peek_event(self):
# Get the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
return self.current_event
def get_event(self):
# Get the next event and proceed further.
if self.current_event is None:
if self.state:
self.current_event = self.state()
value = self.current_event
self.current_event = None
return value
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
def parse_stream_start(self):
# Parse the stream start.
token = self.get_token()
event = StreamStartEvent(token.start_mark, token.end_mark,
encoding=token.encoding)
# Prepare the next state.
self.state = self.parse_implicit_document_start
return event
def parse_implicit_document_start(self):
# Parse an implicit document.
if not self.check_token(DirectiveToken, DocumentStartToken,
StreamEndToken):
self.tag_handles = self.DEFAULT_TAGS
token = self.peek_token()
start_mark = end_mark = token.start_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=False)
# Prepare the next state.
self.states.append(self.parse_document_end)
self.state = self.parse_block_node
return event
else:
return self.parse_document_start()
def parse_document_start(self):
# Parse any extra document end indicators.
while self.check_token(DocumentEndToken):
self.get_token()
# Parse an explicit document.
if not self.check_token(StreamEndToken):
token = self.peek_token()
start_mark = token.start_mark
version, tags = self.process_directives()
if not self.check_token(DocumentStartToken):
raise ParserError(None, None,
"expected '<document start>', but found %r"
% self.peek_token().id,
self.peek_token().start_mark)
token = self.get_token()
end_mark = token.end_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=True, version=version, tags=tags)
self.states.append(self.parse_document_end)
self.state = self.parse_document_content
else:
# Parse the end of the stream.
token = self.get_token()
event = StreamEndEvent(token.start_mark, token.end_mark)
assert not self.states
assert not self.marks
self.state = None
return event
def parse_document_end(self):
# Parse the document end.
token = self.peek_token()
start_mark = end_mark = token.start_mark
explicit = False
if self.check_token(DocumentEndToken):
token = self.get_token()
end_mark = token.end_mark
explicit = True
event = DocumentEndEvent(start_mark, end_mark,
explicit=explicit)
# Prepare the next state.
self.state = self.parse_document_start
return event
def parse_document_content(self):
if self.check_token(DirectiveToken,
DocumentStartToken, Documen
|
google/har-sanitizer
|
harsanitizer/harsan_api.py
|
Python
|
apache-2.0
| 7,146
| 0.012175
|
"""Scans and sanitzes HAR files containing sensitive information."""
# Copyright 2017, Google Inc.
# Authors: Garrett Anderson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import datetime
import json
import urllib2
from flask import Flask, url_for, request, Response, render_template_string
import decorators
from harsanitizer import Har, HarSanitizer
# Config local/remote file locations
CURRENT_DIR = os.path.abspath("./")
# Load/sanity check config.json
try:
with open("./config.json", "r") as config:
STATIC_FOLDER = json.load(config)["static_folder"]
except IOError:
raise IOError(
"'config.json' not found in '{}'. Please ensure that script is "
"being run from root './har-sanitizer/' directory.".format(CURRENT_DIR))
except KeyError:
raise KeyError("'STATIC_FOLDER' key not found in config.json")
WORDLIST_PATH = "{}/wordlist.json".format(STATIC_FOLDER)
MIMETYPES_PATH = "{}/mimetypesScrubList.json".format(STATIC_FOLDER)
# Local STATIC_FOLDER and template config
if STATIC_FOLDER[:4] != "http":
INDEX_PATH = "{}/templates/localhost/index.html".format(STATIC_FOLDER)
# Remote STATIC_FOLDER and template config
else:
INDEX_PATH = "{}/templates/remotehost/index.html".format(STATIC_FOLDER)
# Serialize utility
def json_serial(obj):
"""JSON serializer for datetime.datetime not serializable by default json code."""
if isinstance(obj, datetime.datetime):
serial = obj.isoformat()
return serial
raise TypeError("Object not of type datetime.datetime")
app = Flask(__name__)
@app.route("/")
def index():
if STATIC_FOLDER[:4] == "http":
index_html_str = urllib2.urlopen(INDEX_PATH).read()
else:
with open(INDEX_PATH, "r") as index_file:
index_html_str = index_file.read()
return render_template_string(index_html_str, static_files=STATIC_FOLDER)
@app.route("/get_wordlist", methods=["GET"])
def get_wordlist():
"""Returns default HarSanitizer wordlist."""
hs = HarSanitizer()
try:
if WORDLIST_PATH[:4] == "http":
wordlist_json = json.loads(urllib2.urlopen(WORDLIST_PATH).read())
wordlist = hs.load_wordlist(wordlist=wordlist_json)
else:
wordlist = hs.load_wordlist(wordlist_path=WORDLIST_PATH)
except Exception:
message = {"message": "Error: {} not found.".format(WORDLIST_PATH)}
data = json.dumps(message, default=json_serial)
return Response(data, 500, mimetype="application/json")
data = json.dumps(wordlist, default=json_serial)
return Response(data, 200, mimetype="application/json")
@app.route("/default_mimetype_scrublist", methods=["GET"])
def get_mimetype_scrublist():
"""Returns default HarSanitizer mimeTypes scrub list."""
hs = HarSanitizer()
try:
if MIMETYPES_PATH[:4] == "http":
mimetype_scrub_list = json.loads(urllib2.urlopen(MIMETYPES_PATH).read())
else:
with open(MIMETYPES_PATH, "r") as mimetypes_file:
mimetype_scrub_list = json.load(mimetypes_file)
except Exception:
message = {"message": "Error: {} not found.".format(MIMETYPES_PATH)}
data = json.dumps(message, default=json_serial)
return Response(data, 500, mimetype="application/json")
data = json.dumps(mimetype_scrub_list, default=json_serial)
return Response(data, 200, mimetype="application/json")
@app.route("/cookies", methods=["POST"])
@decorators.accept("application/json")
@decorators.require("application/json")
def req_cookie_names():
"""Returns all cookie names found in POSTed Har (json)."""
data = request.json
hs = HarSanitizer()
har = Har(har=data)
cookies = hs.get_hartype_names(har, "cookies").keys()
data = json.dumps(cookies, default=json_serial)
return Response(data, 200, mimetype="application/json")
@app.route("/headers", methods=["POST"])
@decorators.accept("application/json")
@decorators.require("application/json")
def req_header_names():
"""Returns all header names found in POSTed Har (json)."""
data = request.json
hs = HarSanitizer()
har = Har(har=data)
headers = hs.get_hartype_names(har, "headers").keys()
data = json.dumps(headers, default=json_serial)
return Response(data, 200, mimetype="application/json")
@app.route("/params", methods=["POST"])
@decorators.accept("application/json")
@decorators.require("application/json")
def req_urlparams():
"""Returns all URL Query and POSTData Parameter names found in POSTed Har (json)."""
data = request.json
hs = HarSanitizer()
cond_table = {}
har = Har(har=data)
url_pattern = hs.gen_hartype_names_pattern(har, "queryString")
postdata_pattern = hs.gen_hartype_names_pattern(har, "params")
cond_table.update(url_pattern)
cond_table.update(postdata_pattern)
iter_har_dict = hs.iter_eval_exec(my_iter=har.har_dict, cond_table=cond_table)
har = hs.har
urlparams = har.category["queryString"].keys()
if isinstance(har.category["params"].keys(), list):
postdata_params = har.category["params"].keys()
params = urlparams + postdata_params
else:
params = urlparams
data = json.dumps(params, default=json_serial)
return Response(data, 200, mimetype="application/json")
@app.route("/mimetypes", methods=["POST"])
@decorators.accept("application/json")
@decorators.require("application/json")
def req_mimetypes():
"""Returns all content mimeTypes found in POSTed Har (json)."""
data = request.json
hs = HarSanitizer()
har = Har(har=data)
mimetypes = hs.get_mimetypes(har).keys()
data = json.dumps(mimetypes, default=json_serial
|
)
return Response(data, 200, mimetype="application/json")
@app.route("/scrub_har", methods=["POST"])
@decorators.accept("application/json")
@decorators.require("application/json")
def scrub():
"""Scrubs data["har"] with optional wordlists,
|
content types, and scrub_all type bools.
"""
hs = HarSanitizer()
hs_kwargs = {}
data = request.json
har = Har(har=data["har"])
if "wordlist" in data.keys():
hs_kwargs["wordlist"] = data["wordlist"]
if "content_list" in data.keys():
hs_kwargs["content_list"] = data["content_list"]
if "all_cookies" in data.keys():
hs_kwargs["all_cookies"] = data["all_cookies"]
if "all_headers" in data.keys():
hs_kwargs["all_headers"] = data["all_headers"]
if "all_params" in data.keys():
hs_kwargs["all_params"] = data["all_params"]
if "all_content_mimetypes" in data.keys():
hs_kwargs["all_content_mimetypes"] = data["all_content_mimetypes"]
sanitized_har = hs.scrub(har, **hs_kwargs)
data = json.dumps(sanitized_har.har_dict, indent=2, separators=(",", ": "))
return Response(data, 200, mimetype="text/plain")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080, debug=False)
|
kelly-shen/canigraduate.uchicago.edu
|
backend/uchicago/src/timeschedules_parser_test.py
|
Python
|
mit
| 1,945
| 0
|
import unittest
import re
import requests
from .timeschedules_parser import FSM
class TestTimeschedulesParser(unittest.TestCase):
def _construct_fsm(self, string):
fsm = FSM([])
def stub(*args, **kwargs):
return string
fsm.next_string = stub
return fsm
def test_next_schedule(self):
self.assertEqual([], self._construct_fsm("ARRARR").next_schedule())
self.assertEqual(
[[2190, 2240], [5070, 5120], [7950, 8000]],
self._construct_fsm("MWF12:30PM-1:20PM").next_schedule())
self.assertEqual(
[[3510, 3590], [6390, 6470]],
self._construct_fsm("TTh10:30AM-11:50AM").next_schedule())
self.assertEqual(
[[3510, 3590]],
self._construct_fsm("Tue10:30AM-11:50AM").next_schedule())
self.assertEqual(
[[3510, 3590]],
self._construct_fsm("U10:30AM-11:50AM").next_schedule())
self.assertEqual(
[[1890, 2360], [3330, 3800], [4770, 5240], [6210, 6680],
[7650, 8120]],
self._construct_fsm("M-F7:30AM-3:20PM").next_schedule())
self.assertEqual(
|
[[9420, 9600]],
self._construct_fsm("Sat1:00PM-4:00PM").next_schedule())
self.assertEqual(
[[2190, 2240], [5070, 5120], [6510, 6560], [7950, 8000]],
self._construct_fsm("MWHF12:30PM-1:20PM").next_schedule())
self.assertEqual(
[[2190, 2240], [5070, 5120], [6510, 6560], [7950, 8000]],
self._construct_fsm("MWRF12:30PM-1:20PM").n
|
ext_schedule())
self.assertEqual(
[[2190, 2240], [5070, 5120], [6510, 6560], [7950, 8000]],
self._construct_fsm("MWTHF12:30PM-1:20PM").next_schedule())
self.assertEqual(
[[6660, 6830]],
self._construct_fsm("Thu3:00PM-5:50PM").next_schedule())
if __name__ == '__main__':
unittest.main()
|
pombredanne/invenio
|
modules/webaccess/lib/access_control_firerole.py
|
Python
|
gpl-2.0
| 15,550
| 0.007203
|
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio Access Control FireRole."""
__revision__ = "$Id$"
__lastupdated__ = """$Date$"""
"""These functions are for realizing a firewall like role definition for extending
webaccess to connect user to roles using every infos about users.
"""
import re
import cPickle
from zlib import compress, decompress
import sys
import time
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from invenio.access_control_config import InvenioWebAccessFireroleError
from invenio.dbquery import run_sql, blob_to_string
from invenio.config import CFG_CERN_SITE
from invenio.access_control_config import CFG_ACC_EMPTY_ROLE_DEFINITION_SRC, \
CFG_ACC_EMPTY_ROLE_DEFINITION_SER, CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ
# INTERFACE
def compile_role_definition(firerole_def_src):
""" Given a text in which every row contains a rule it returns the compiled
object definition.
Rules have the following syntax:
allow|deny [not] field {list of one or more (double)quoted string or regexp}
or allow|deny any
Every row may contain a # sign followed by a comment which are discarded.
Field could be any key contained in a user_info dictionary. If the key does
not exist in the dictionary, the rule is skipped.
The first rule which matches return.
"""
line = 0
ret = []
default_allow_p = False
if not firerole_def_src or not firerole_def_src.strip():
firerole_def_src = CFG_ACC_EMPTY_ROLE_DEFINITION_SRC
for row in firerole_def_src.split('\n'):
line += 1
row = row.strip()
if not row:
continue
clean_row = _no_comment_re.sub('', row)
if clean_row:
g = _any_rule_re.match(clean_row)
if g:
default_allow_p = g.group('command').lower() == 'allow'
break
g = _rule_re.match(clean_row)
if g:
allow_p = g.group('command').lower() == 'allow'
not_p = g.group('not') != None
field = g.group('field').lower()
# Renaming groups to group
for alias_item in _aliasTable:
if field in alias_item:
field = alias_item[0]
break
if field.startswith('precached_'):
raise InvenioWebAccessFireroleError("Error while compiling rule %s (line %s): %s is a reserved key and can not be used in FireRole rules!" % (row, line, field))
expressions = g.group('expression')+g.group('more_expressions')
expressions_list = []
for expr in _expressions_re.finditer(expressions):
expr = expr.group()
if field in ('from', 'until'):
try:
expressions_list.append((False, time.mktime(time.strptime(expr[1:-1], '%Y-%m-%d'))))
except Exception, msg:
raise InvenioWebAccessFireroleError("Syntax error while compiling rule %s (line %s): %s is not a valid date with format YYYY-MM-DD because %s!" % (row, line, expr, msg))
elif expr[0] == '/':
try:
expressions_list.append((True, re.compile(expr[1:-1], re.I)))
except Exception, msg:
raise InvenioWebAccessFireroleError("Syntax error while compiling rule %s (line %s): %s is not a valid re because %s!" % (row, line, expr, msg))
else:
if field == 'remote_ip' and '/' in expr[1:-1]:
try:
expressions_list.append((False, _ip_matcher_builder(expr[1:-1])))
except Exception, msg:
raise InvenioWebAccessFireroleError("Syntax error while compiling rule %s (line %s): %s is not a valid ip group because %s!" % (row, line, expr, msg))
else:
expressions_list.append((False, expr[1:-1]))
expressions_list = tuple(expressions_list)
if field in ('from', 'until'):
if len(expressions_list) != 1:
raise InvenioWebAccessFireroleError("Error when compiling rule %s (line %s): exactly one date is expected when using 'from' or 'until', but %s were found" % (row, line, len(expressions_list)))
if not_p:
raise InvenioWebAccessFireroleError("Error when compiling rule %s (line %s): 'not' is not allowed when using 'from' or 'until'" % (row, line))
ret.append((allow_p, not_p, field, expressions_list))
else:
raise InvenioWebAccessFireroleError("Syntax error while compiling rule %s (line %s): not a valid rule!" % (row, line))
return (default_allow_p, tuple(ret))
def repair_role_definitions():
""" Try to rebuild compiled serialized definitions from their respectives
sources. This is needed in case Python break back compatibility.
"""
definitions = run_sql("SELECT id, firerole_def_src FROM accROLE")
for role_id, firerole_def_src in definitions:
run_sql("UPDATE accROLE SET firerole_def_ser=%s WHERE id=%s", (serialize(compile_role_definition(firerole_def_src)), role_id))
def store_role_definition(role_id, firerole_def_ser, firerole_def_src):
""" Store a compiled serialized definition and its source in the database
alongside the role to which it belong.
@param role_id: the role_id
@param firerole_def_ser: the serialized compiled definition
@param firerole_def_src: the sources from which the definition was taken
"""
run_sql("UPDATE accROLE SET firerole_def_ser=%s, firerole_def_src=%s WHERE id=%s", (firerole_def_ser, firerole_def_src, role_id))
def load_role_definition(role_id):
""" Load the definition corresponding to a role. If the compiled definition
is corrupted it try to repairs definitions from their sources and try again
to return the definition.
@param role_id:
@return: a deserialized compiled role definition
"""
res = run_sql("SELECT firerole_def_ser FROM accROLE WHERE id=%s", (role_id, ), 1)
if res:
try:
return deserialize(res[0][0])
except Exception:
|
## Something bad might have happened? (Update of Python?)
repair_role_definitions()
res = run_sql("SELECT firerole_def_ser FROM accROLE WHERE id=%s", (role_id, ), 1)
if res:
return deserialize(res[0][0])
return CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ
def acc_firerole_extract_emails(firerole_def_obj):
"""
Best effort function to
|
extract all the possible email addresses
authorized by the given firerole.
"""
authorized_emails = set()
try:
default_allow_p, rules = firerole_def_obj
for (allow_p, not_p, field, expressions_list) in rules: # for every rule
if not_p:
continue
if field == 'group':
for reg_p, expr in expressions_list:
if reg_p:
continue
if CFG_CERN_SITE and expr.endswith(' [CERN]'):
autho
|
seattleacademy/fall27
|
arrowup.py
|
Python
|
mit
| 336
| 0.029762
|
from sense_hat import SenseHat
sense = SenseHat()
X = [255, 0, 0] # Red
O = [255, 255, 255] # White
up_arrow = [
O, O, O, X, X, O, O, O,
O, O, X, X, X, X, O, O,
O, X,
|
X, X, X, X, X, O,
O, O, O, X, X, O, O, O,
O, O, O, X, X, O, O, O,
O, O,
|
O, X, X, O, O, O,
O, O, O, X, X, O, O, O,
O, O, O, X, X, O, O, O
]
sense.set_pixels(up_arrow)
|
kuasha/cosmos
|
cosmos/schema/object.py
|
Python
|
mit
| 154
| 0.006494
|
"""
Copyright (C) 201
|
4 Maruf Maniruzzaman
Website: http://cosmosframework.com
Author: Maruf Maniruzz
|
aman
License :: OSI Approved :: MIT License
"""
|
mmenz/michaelmenz
|
spare-parts/animatedgifs.py
|
Python
|
apache-2.0
| 420
| 0.033333
|
import gizeh
su
|
rface = gizeh.Surface(width=320
|
, height=260)
circle = gizeh.circle (r=40, # radius, in pixels
xy= [156, 200], # coordinates of the center
fill= (1,0,0)) # 'red' in RGB coordinates
circle.draw( surface ) # draw the circle on the surface
surface.get_npimage() # export as a numpy array (we will use that)
surface.write_to_png("my_drawing.png") # export as a PNG
|
awolfly9/hammer
|
test/test.py
|
Python
|
mit
| 2,505
| 0.004391
|
# -*- coding=utf-8 -*-
import sys
import time
import logging
import os
sys.path.append(os.getcwd())
logging.basicConfig()
from hammer.sqlhelper import SqlHelper
db_config = {
'host': 'localhost',
'port': 3306,
'user': 'root',
'password': '123456',
'db': 'test',
}
def test_create_table():
command = '''
CREATE TABLE `test_test` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`name` varchar(10) DEFAULT NULL,
`age` int(11) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=34 DEFAULT CHARSET=utf8;
'''
sql.execute(command, True)
def test_select():
command = ''''''
def test_insert():
datas = [
{
'name': "a'b'c",
'age': 1,
'date': None,
},
{
'name': 'a"b"c',
'age': 1,
'date': None,
},
{
'name': 'a"b";\'c',
'age': 1,
'date': None,
},
{
'name': "a\"blll\";\'c",
'age': 1,
'date': '2018',
},
]
sql.insert_datas(datas, table_name = 'test')
def test_update():
datas = [
{
'id': 1,
'name': "a'b'c",
'age': 2,
'date': None,
},
{
'id': 2,
'name': 'a"b"c',
'age': 2,
'date': None,
},
{
'id': 3,
'name': 'a"b";\'c',
'age': 2,
'date': None,
},
{
'id': 4,
'name': "a\"blll\";\'c",
'age': 2,
'date': '2018-01-02',
},
]
sql.update_datas(datas, table_name = 'test')
def test_is_exists():
print(sql.is_exists('testdfads'))
def test_check_table_exists():
|
print(sql.check_table_exists('test', db_name = 'tesdt'))
if __name__ == '__main__':
sql = SqlHelper(**db_config)
|
# test_insert()
# test_update()
# test_is_exists()
# test_check_table_exists()
datas = []
for i in range(1, 3):
data = {
'id': i,
'name': "vvv",
'age': None,
'date': None,
}
datas.append(data)
print(datas)
print(len(datas))
start = time.time()
# sql.insert_datas(datas, table_name = 'test')
sql.update_datas(datas, table_name = 'test', update_keys = ['name', 'age'])
print(time.time() - start)
|
twilio/twilio-python
|
twilio/rest/insights/v1/setting.py
|
Python
|
mit
| 7,805
| 0.000769
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class SettingList(ListResource):
def __init__(self, version):
"""
Initialize the SettingList
:param Version version: Version that contains the resource
:returns: twilio.rest.insights.v1.setting.SettingList
:rtype: twilio.rest.insights.v1.setting.SettingList
"""
super(SettingList, self).__init__(version)
# Path Solution
self._solution = {}
def get(self):
"""
Constructs a SettingContext
:returns: twilio.rest.insights.v1.setting.SettingContext
:rtype: twilio.rest.insights.v1.setting.SettingContext
"""
return SettingContext(self._version, )
def __call__(self):
"""
Constructs a SettingContext
:returns: twilio.rest.insights.v1.setting.SettingContext
:rtype: twilio.rest.insights.v1.setting.SettingContext
"""
return SettingContext(self._version, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Insights.V1.SettingList>'
class SettingPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the SettingPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.insights.v1.setting.SettingPage
:rtype: twilio.rest.insights.v1.setting.SettingPage
"""
super(SettingPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SettingInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.insights.v1.setting.SettingInstance
:rtype: twilio.rest.insights.v1.setting.SettingInstance
"""
return SettingInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Insights.V1.SettingPage>'
class SettingContext(InstanceContext):
def __init__(self, version):
"""
Initialize the SettingContext
:param Version version: Version that contains the resource
:returns: twilio.rest.insights.v1.setting.SettingContext
:rtype: twilio.rest.insights.v1.setting.SettingContext
"""
super(SettingContext, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Voice/Settings'.format(**self._solution)
def fetch(
|
self, subaccount_sid=values.unset):
"""
Fetch the SettingInstance
:param unicode subaccount_sid: The subaccount_sid
:returns: The fetched SettingInstance
:rtype: twilio.rest.insights.v1.setting.SettingInstance
"""
data = values.of({'Subac
|
countSid': subaccount_sid, })
payload = self._version.fetch(method='GET', uri=self._uri, params=data, )
return SettingInstance(self._version, payload, )
def update(self, advanced_features=values.unset, voice_trace=values.unset,
subaccount_sid=values.unset):
"""
Update the SettingInstance
:param bool advanced_features: The advanced_features
:param bool voice_trace: The voice_trace
:param unicode subaccount_sid: The subaccount_sid
:returns: The updated SettingInstance
:rtype: twilio.rest.insights.v1.setting.SettingInstance
"""
data = values.of({
'AdvancedFeatures': advanced_features,
'VoiceTrace': voice_trace,
'SubaccountSid': subaccount_sid,
})
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return SettingInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Insights.V1.SettingContext {}>'.format(context)
class SettingInstance(InstanceResource):
def __init__(self, version, payload):
"""
Initialize the SettingInstance
:returns: twilio.rest.insights.v1.setting.SettingInstance
:rtype: twilio.rest.insights.v1.setting.SettingInstance
"""
super(SettingInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'advanced_features': payload.get('advanced_features'),
'voice_trace': payload.get('voice_trace'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SettingContext for this SettingInstance
:rtype: twilio.rest.insights.v1.setting.SettingContext
"""
if self._context is None:
self._context = SettingContext(self._version, )
return self._context
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def advanced_features(self):
"""
:returns: The advanced_features
:rtype: bool
"""
return self._properties['advanced_features']
@property
def voice_trace(self):
"""
:returns: The voice_trace
:rtype: bool
"""
return self._properties['voice_trace']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
def fetch(self, subaccount_sid=values.unset):
"""
Fetch the SettingInstance
:param unicode subaccount_sid: The subaccount_sid
:returns: The fetched SettingInstance
:rtype: twilio.rest.insights.v1.setting.SettingInstance
"""
return self._proxy.fetch(subaccount_sid=subaccount_sid, )
def update(self, advanced_features=values.unset, voice_trace=values.unset,
subaccount_sid=values.unset):
"""
Update the SettingInstance
:param bool advanced_features: The advanced_features
:param bool voice_trace: The voice_trace
:param unicode subaccount_sid: The subaccount_sid
:returns: The updated SettingInstance
:rtype: twilio.rest.insights.v1.setting.SettingInstance
"""
return self._proxy.update(
advanced_features=advanced_features,
voice_trace=voice_trace,
subaccount_sid=subaccount_sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Insights.V1.SettingInstance {}>'.format(context)
|
USGSDenverPychron/pychron
|
docs/user_guide/operation/scripts/examples/basic.py
|
Python
|
apache-2.0
| 2,487
| 0.002413
|
#!Measurement
# all of this is configuration info that can be used in the script.
# you refer to these values using mx.<group>.<attribute>
# e.g
# mx.baseline.counts is 180
# mx.multicollect.detector is H1
'''
baseline:
after: true
before: false
counts: 180
detector: H1
mass: 34.2
settling_time: 15
default_fits: nominal
equilibration:
eqtime: 1.0
inlet: R
inlet_delay: 3
outlet: O
use_extraction_eqtime:
|
true
multicollect:
counts: 400
detector: H1
isotope: Ar40
peakcenter:
after: true
before: false
detector: H1
detectors:
- H1
- AX
- CDD
isotop
|
e: Ar40
peakhop:
hops_name: ''
use_peak_hop: false
'''
# entry point for the script
def main():
# print a message to the user
info('unknown measurement script')
# activate the following detectors. measurements will be plotted and save for these detectors
activate_detectors('H2', 'H1', 'AX', 'L1', 'L2', 'CDD')
# position the magnet with Ar40 on H1
position_magnet(mx.multicollect.isotope, detector=mx.multicollect.detector)
# choose where to get the equilibration duration from
# sniff the gas during equilibration
if mx.equilibration.use_extraction_eqtime:
eqt = eqtime
else:
eqt = mx.equilibration.eqtime
'''
Equilibrate is non-blocking so use a sniff or sleep as a placeholder
e.g sniff(<equilibration_time>) or sleep(<equilibration_time>)
'''
# start the equilibration thread
equilibrate(eqtime=eqt, inlet=mx.equilibration.inlet, outlet=mx.equilibration.outlet,
delay=mx.equilibration.inlet_delay)
# set time zero after equilibrate returns i.e after the ion pump valve closes
set_time_zero()
# record/plot the equilibration
sniff(eqt)
# set the default fits
set_fits()
set_baseline_fits()
# multicollect on active detectors for 400
multicollect(ncounts=mx.multicollect.counts)
if mx.baseline.after:
# do a baseline measurement
baselines(ncounts=mx.baseline.counts, mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
if mx.peakcenter.after:
# do a peak center scan and update the mftable with new peak centers
activate_detectors(*mx.peakcenter.detectors, **{'peak_center': True})
peak_center(detector=mx.peakcenter.detector, isotope=mx.peakcenter.isotope)
# print a message to the user
info('finished measure script')
|
tboyce021/home-assistant
|
homeassistant/components/volvooncall/binary_sensor.py
|
Python
|
apache-2.0
| 856
| 0.002336
|
"""Support for VOC."""
from homeassistant.components.binary_sensor import DEVICE_CLASSES, BinarySensorEntity
from . import DATA_KEY, VolvoEntity
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Volvo sensors."""
if discovery_info is None:
return
async_add_entities([VolvoSensor(has
|
s.data[DATA_KEY], *discovery_info)])
class VolvoSensor(VolvoEntity, BinarySensorEntity):
"""Representation of a Volvo sensor."""
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return self.instrument.is_on
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
if self.instrument.device_class in DEVICE_CLASSES:
return self.instrument.device_class
ret
|
urn None
|
Crystal-SDS/filter-middleware
|
crystal_filter_middleware/handlers/proxy.py
|
Python
|
gpl-3.0
| 13,744
| 0.001237
|
from crystal_filter_middleware.handlers import CrystalBaseHandler
from swift.common.swob import HTTPMethodNotAllowed
from swift.common.wsgi import make_subrequest
from swift.common.utils import public
import operator
import json
import copy
import urllib
import os
import re
mappings = {'>': operator.gt, '>=': operator.ge,
'==': operator.eq, '<=': operator.le, '<': operator.lt,
'!=': operator.ne, "OR": operator.or_, "AND": operator.and_}
class CrystalProxyHandler(CrystalBaseHandler):
def __init__(self, request, conf, app, logger):
super(CrystalProxyHandler, self).__init__(request, conf,
app, logger)
self.etag = None
self.filter_exec_list = None
def _get_dynamic_filters(self):
# Dynamic binding of policies: using a Lua script that executes
# a hgetall on the first matching key of a list and also returns
# the global filters
lua_sha = self.conf.get('LUA_get_pipeline_sha')
args = (self.account.replace('AUTH_', ''), '' if self.container is None else self.container)
redis_list = self.redis.evalsha(lua_sha, 0, *args)
index = redis_list.index("@@@@") # Separator between pipeline and global filters
self.filter_list = dict(zip(redis_list[0:index:2], redis_list[1:index:2]))
self.global_filters = dict(zip(redis_list[index+1::2], redis_list[index+2::2]))
self.proxy_filter_exec_list = {}
self.object_filter_exec_list = {}
if self.global_filters or self.filter_list:
self.proxy_filter_exec_list = self._build_filter_execution_list('proxy')
self.object_filter_exec_list = self._build_filter_execution_list('object')
def _parse_vaco(self):
return self.request.split_path(2, 4, rest_with_last=True)
def handle_request(self):
if self.is_crystal_valid_request and hasattr(self, self.request.method):
try:
self._get_dynamic_filters()
handler = getattr(self, self.request.method)
getattr(handler, 'publicly_accessible')
except AttributeError:
return HTTPMethodNotAllowed(request=self.request)
return handler()
else:
self.logger.info('Request disabled for Crystal')
return self.request.get_response(self.app)
def _check_conditions(self, filter_metadata):
"""
This method ckecks the object_tag, object_type and object_size parameters
introduced by the dashborad to run the filter.
"""
if not filter_metadata['object_type'] and \
not filter_metadata['object_tag'] and \
not filter_metadata['object_size']:
return True
metadata = {}
if self.method == 'put':
for key in self.request.headers.keys():
metadata[key.lower()] = self.request.headers.get(key)
else:
sub_req = make_subrequest(self.request.environ, method='HEAD',
path=self.request.path_info,
headers=self.request.headers,
swift_source='Crystal Filter Middleware')
resp = sub_req.get_response(self.app)
metadata = resp.headers
correct_type = True
correct_size = True
correct_tags = True
try:
if filter_metadata['object_type']:
object_name = filter_metadata['object_name']
filename = self.request.environ['PATH_INFO']
pattern = re.compile(object_name)
if not pattern.search(filename):
correct_type = False
if filter_metadata['object_tag']:
tags = filter_metadata['object_tag'].split(',')
tag_checking = list()
for tag in tags:
key, value = tag.split(':')
meta_key = ('X-Object-Meta-'+key).lower()
sysmeta_key = ('X-Object-Sysmeta-Meta-'+key).lower()
correct_tag = (meta_key in metadata and
metadata[meta_key] == value) or \
(sysmeta_key in metadata and
metadata[sysmeta_key] == value)
tag_checking.append(correct_tag)
correct_tags = all(tag_checking)
if filter_metadata['object_size']:
object_size = filter_metadata['object_size']
op = mappings[object_size[0]]
obj_lenght = int(object_size[1])
correct_size = op(int(metadata['Content-Length']),
obj_lenght)
except Exception as e:
self.logger.error(str(e))
return False
return correct_type and correct_size and correct_tags
def _parse_filter_metadata(self, filter_metadata):
"""
This method parses the filter metadata
"""
filter_name = filter_metadata['filter_name']
language = filter_metadata["language"]
params = filter_metadata["params"]
filter_type = filter_metadata["filter_type"]
filter_main = filter_metadata["main"]
filter_dep = filter_metadata["dependencies"]
filter_size = filter_metadata["content_length"]
reverse = filter_metadata["reverse"]
filter_data = {'name': filter_name,
'language': language,
'params': self._parse_csv_params(params),
'reverse': reverse,
|
'type': filter_type,
'main': filter_main,
'dependencies': filter_dep,
'size': filter_size}
return filter_data
def _build_filter_execution_list(self, server):
"""
|
This method builds the filter execution list (ordered).
"""
filter_execution_list = {}
''' Parse global filters '''
for _, filter_metadata in self.global_filters.items():
filter_metadata = json.loads(filter_metadata)
if self.method in filter_metadata and filter_metadata[self.method] \
and filter_metadata['execution_server'] == server \
and self._check_conditions(filter_metadata):
filter_data = self._parse_filter_metadata(filter_metadata)
order = filter_metadata["execution_order"]
filter_execution_list[int(order)] = filter_data
''' Parse Project specific filters'''
for _, filter_metadata in self.filter_list.items():
filter_metadata = json.loads(filter_metadata)
if self.method in filter_metadata and filter_metadata[self.method] \
and filter_metadata['execution_server'] == server \
and self._check_conditions(filter_metadata):
filter_data = self._parse_filter_metadata(filter_metadata)
order = filter_metadata["execution_order"]
filter_execution_list[order] = filter_data
return filter_execution_list
def _format_crystal_metadata(self, filter_list):
"""
This method generates the metadata that will be stored alongside the
object in the PUT requests. It allows the reverse case of the filters
without querying the centralized controller.
"""
for key in filter_list.keys():
cfilter = filter_list[key]
if cfilter['reverse'] != 'False':
current_params = cfilter['params']
if current_params:
cfilter['params']['reverse'] = 'True'
else:
cfilter['params'] = {'reverse': 'True'}
cfilter['execution_server'] = cfilter['reverse']
cfilter.pop('reverse')
else:
filter_list.pop(key)
return filter_list
def _set_crystal_metadata(self):
"""
This method generates the metadata that will be stored al
|
sc68cal/neutron-classifier
|
neutron_classifier/db/models.py
|
Python
|
apache-2.0
| 5,550
| 0
|
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitat
|
ions
# under the License.
from neutron_classifier.common import constants
from oslo_utils import uuidutils
import sql
|
alchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy import orm
Base = declarative_base()
# Stolen from neutron/db/model_base.py
class HasTenant(object):
"""Tenant mixin, add to subclasses that have a tenant."""
tenant_id = sa.Column(sa.String(255), index=True)
# Stolen from neutron/db/model_base.py
class HasId(object):
"""id mixin, add to subclasses that have an id."""
id = sa.Column(sa.String(36),
primary_key=True,
default=uuidutils.generate_uuid)
class Classifier(Base, HasId):
__tablename__ = 'classifiers'
classifier_type = sa.Column(sa.String)
__mapper_args__ = {'polymorphic_on': classifier_type}
class ClassifierGroup(Base, HasTenant, HasId):
__tablename__ = 'classifier_groups'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
classifier_chain = orm.relationship(
'ClassifierChainEntry',
backref=orm.backref('classifier_chains', cascade='all, delete'),
order_by='ClassifierChainEntry.sequence',
collection_class=ordering_list('sequence', count_from=1))
service = sa.Column(sa.Enum(*constants.NEUTRON_SERVICES), index=True)
class ClassifierChainEntry(Base, HasId):
__tablename__ = 'classifier_chains'
classifier_group_id = sa.Column(sa.String(36),
sa.ForeignKey('classifier_groups.id',
ondelete="CASCADE"))
classifier_id = sa.Column(sa.String(36),
sa.ForeignKey('classifiers.id',
ondelete="CASCADE"))
classifier = orm.relationship(Classifier)
sequence = sa.Column(sa.Integer)
classifier_group = orm.relationship(ClassifierGroup)
class IpClassifier(Classifier):
__tablename__ = 'ip_classifiers'
__mapper_args__ = {'polymorphic_identity': 'ipclassifier'}
id = sa.Column(sa.String(36), sa.ForeignKey('classifiers.id'),
primary_key=True)
source_ip_prefix = sa.Column(sa.String(255))
destination_ip_prefix = sa.Column(sa.String(255))
class Ipv4Classifier(Classifier):
__tablename__ = 'ipv4_classifiers'
__mapper_args__ = {'polymorphic_identity': 'ipv4classifier'}
id = sa.Column(sa.String(36), sa.ForeignKey('classifiers.id'),
primary_key=True)
dscp_tag = sa.Column(sa.String(255))
protocol = sa.Enum(*constants.PROTOCOLS)
dscp_mask = sa.Column(sa.String(255))
class Ipv6Classifier(Classifier):
__tablename__ = 'ipv6_classifiers'
__mapper_args__ = {'polymorphic_identity': 'ipv6classifier'}
id = sa.Column(sa.String(36), sa.ForeignKey('classifiers.id'),
primary_key=True)
next_header = sa.Enum(*constants.PROTOCOLS)
traffic_class = sa.Column(sa.String(255))
flow_label = sa.Column(sa.String(255))
class TransportClassifier(Classifier):
__tablename__ = 'transport_classifiers'
__mapper_args__ = {'polymorphic_identity': 'transportclassifier'}
id = sa.Column(sa.String(36), sa.ForeignKey('classifiers.id'),
primary_key=True)
source_port_range_max = sa.Column(sa.Integer)
source_port_range_min = sa.Column(sa.Integer)
destination_port_range_max = sa.Column(sa.Integer)
destination_port_range_min = sa.Column(sa.Integer)
class EthernetClassifier(Classifier):
__tablename__ = 'ethernet_classifiers'
__mapper_args__ = {'polymorphic_identity': 'ethernetclassifier'}
id = sa.Column(sa.String(36), sa.ForeignKey('classifiers.id'),
primary_key=True)
ethertype = sa.Column(sa.String(255))
source_mac = sa.Column(sa.String(255))
destination_mac = sa.Column(sa.String(255))
class VlanClassifier(Classifier):
__tablename__ = 'vlan_classifiers'
__mapper_args__ = {'polymorphic_identity': 'vlanclassifier'}
id = sa.Column(sa.String(36), sa.ForeignKey('classifiers.id'),
primary_key=True)
vlan_priority = sa.Column(sa.Integer)
class EncapsulationClassifier(Classifier):
__tablename__ = 'encapsulation_classifiers'
__mapper_args__ = {'polymorphic_identity': 'encapsulationclassifier'}
id = sa.Column(sa.String(36), sa.ForeignKey('classifiers.id'),
primary_key=True)
encapsulation_type = sa.Column(sa.Enum(*constants.ENCAPSULATION_TYPES))
encapsulation_id = sa.Column(sa.String(255))
class NeutronPortClassifier(Classifier):
__tablename__ = 'neutron_port_classifiers'
__mapper_args__ = {'polymorphic_identity': 'neutronportclassifier'}
id = sa.Column(sa.String(36), sa.ForeignKey('classifiers.id'),
primary_key=True)
logical_source_port = sa.Column(sa.String(255))
logical_destination_port = sa.Column(sa.String(255))
|
MichaelDoyle/Diamond
|
src/collectors/amavis/amavis.py
|
Python
|
mit
| 3,732
| 0
|
# coding=utf-8
"""
Collector that reports amavis metrics as reported by amavisd-agent
#### Dependencies
* amavisd-agent must be present in PATH
"""
import os
import subprocess
import re
import diamond.collector
import diamond.convertor
from diamond.collector import str_to_bool
class AmavisCollector(diamond.collector.Collector):
# From the source of amavisd-agent and it seems like the three interesting
# formats are these: ("x y/h", "xMB yMB/h", "x s y s/msg"),
# so this, ugly as it is to hardcode it this way, it should be right.
#
# The other option would be to directly read and decode amavis' berkeley
# db, and I don't even want to get there
matchers = [
re.compile(r'^\s*(?P<name>[\w]+)\s+(?P<time>[\d]+) s\s+'
r'(?P<frequency>[\d.]+) s/msg\s+\([\w]+\)\s*$'),
re.compile(r'^\s*(?P<name>[\w.-]+)\s+(?P<count>[\d]+)\s+'
r'(?P<frequency>[\d.]+)/h\s+(?P<percentage>[\d.]+) %'
r'\s\([\w]+\)\s*$'),
re.compile(r'^\s*(?P<name>[\w.-]+)\s+(?P<size>[\d]+)MB\s+'
r'(?P<frequency>[\d.]+)MB/h\s+(?P<percentage>[\d.]+) %'
r'\s\([\w]+\)\s*$'),
]
def get_default_config_help(self):
config_help = super(AmavisCollector, self).get_default_config_help()
config_help.update({
'amavisd_exe': 'The path to amavisd-agent',
'use_sudo': 'Call amavisd-agent using sudo',
'sudo_exe': 'The path to sudo',
'sudo_user': 'The user to use if using sudo',
})
return config_help
def get_default_config(self):
config = super(AmavisCollector, self).get_default_config()
config.update({
'path': 'amavis',
'amavisd_exe': '/usr/sbin/amavisd-agent',
'use_sudo': False,
'sudo_exe': '/usr/bin/sudo',
'sudo_user': 'amavis',
})
return config
def collect(self):
"""
Collect memory stats
"""
try:
if str_to_bool(self.config['use_sudo']):
# Use -u instead of --user as the former is more portable. Not
# all versions of sudo support the long form --user.
cmdline = [
self.config['sudo_exe'], '-u', self.config['sudo_user'],
'--', self.config['amavisd_exe'], '-c', '1'
]
else:
cmdline = [self.config['amavisd_exe'], '-c', '1']
agent = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
agent_out = agent.communicate()[0]
lines = agent_out.strip().split(os.linesep)
for line in lines:
for rex in self.matchers:
res = rex.match(line)
if res:
groups = res.groupdict()
name = groups['name']
for metric, value in groups.items():
if metric == 'name':
continue
mtype = 'GAUGE'
precision = 2
if metric in ('count', 'time'):
mtype = 'COUNTER'
precision = 0
self.publish("{}.{}".format(name, metric),
|
value, metric_type=mtype,
|
precision=precision)
except OSError as err:
self.log.error("Could not run %s: %s",
self.config['amavisd_exe'],
err)
return None
return True
|
dhocker/athomepowerlineserver
|
helpers/sun_data.py
|
Python
|
gpl-3.0
| 3,610
| 0.000831
|
# AtHomePowerlineServer - networked server for CM11/CM11A/XTB-232 X10 controllers
# Copyright (C) 2014, 2015 Dave Hocker
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# See the LICENSE file for more details.
#
from datetime import datetime, timedelta
from astral import LocationInfo
from astral.sun import sun
from astral.geocoder import database, lookup
from Configuration import Configuration
def get_astral_data(for_datetime):
'''
Returns the sunrise and sunset times for the given date.
Uses the Astral package to compute sunrise/sunset for the
configured city.
Reference https://astral.readthedocs.io/en/latest/index.html
:param for_datetime: The date for the astral data
:return: Returns a dict containing the keys sunrise and sunset.
The values are datetime objects.
'''
city = None
# Either city/name or latitude and longitude are required
if Configuration.City() != "":
db = database()
try:
city = lookup(Configuration.City(), db)
# Overrides
if Configuration.Latitude() != "":
city.latitude = float(Configuration.Latitude())
if Configuration.Longitude() != "":
city.longitude = float(Configuration.Longitude())
except KeyError:
pass
if city is None:
# Default if no city is configured
city = LocationInfo()
# We expect latitude and longitude to be configured to override city
if Configuration.Latitude() != "" and Configuration.Longitude() != "":
city.latitude = float(Configuration.Latitude())
city.longitude = float(Configuration.Longitude())
else:
raise ValueError("Latitude and longitude are required")
# region is not used
# city.region = ""
# Local timezone
city.timezone = datetime.now().astimezone().tzinfo
return sun(city.observer, date=for_datetime, tzinfo=city.timezone)
def get_sun_data(for_datetime):
'''
Returns the sunrise and sunset times for the given date.
Uses the Astral package to compute sunrise/sunset for the
configured city.
Reference https://pythonhosted.or
|
g/astral/module.html
:param for_datetime:
:return: Returns a dict containing the keys sunrise and sunset.
'''
sun_data = get_astral_data(for_datetime)
sun_d
|
ata_response = {}
sun_data_response["sunrise"] = sun_data["sunrise"].isoformat()
sun_data_response["sunset"] = sun_data["sunset"].isoformat()
return sun_data_response
def round_to_minute(time_to_round):
round_adj = 0
rounded = datetime(time_to_round.year, time_to_round.month, time_to_round.day,
hour=time_to_round.hour, minute=time_to_round.minute, second=0, microsecond=0,
tzinfo=time_to_round.tzinfo)
if time_to_round.second >= 30:
round_adj = timedelta(minutes=1)
rounded = rounded + round_adj
return rounded
def get_sunrise(for_datetime):
"""
Return the sunrise time for a given date/time
"""
sun_data = get_astral_data(for_datetime)
# Returns a datetime instance in local time
return round_to_minute(sun_data["sunrise"])
def get_sunset(for_datetime):
"""
Return the sunset time for a given date/time
"""
sun_data = get_astral_data(for_datetime)
# Returns a datetime instance in local time
return round_to_minute(sun_data["sunset"])
|
andrius-preimantas/odoo
|
addons/base_action_rule/base_action_rule.py
|
Python
|
agpl-3.0
| 15,745
| 0.005017
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
import time
import logging
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
_logger = logging.getLogger(__name__)
DATE_RANGE_FUNCTION = {
'minutes': lambda interval: timedelta(minutes=interval),
'hour': lambda interval: timedelta(hours=interval),
'day': lambda interval: timedelta(days=interval),
'month': lambda interval: timedelta(months=interval),
False: lambda interval: timedelta(0),
}
def get_datetime(date_str):
'''Return a datetime from a date string or a datetime string'''
# complete date time if date_str contains only a date
if ' ' not in date_str:
date_str = date_str + " 00:00:00"
return datetime.strptime(date_str, DEFAULT_SERVER_DATETIME_FORMAT)
class base_action_rule(osv.osv):
""" Base Action Rules """
_name = 'base.action.rule'
_description = 'Action Rules'
_order = 'sequence'
_columns = {
'name': fields.char('Rule Name', required=True),
'model_id': fields.many2one('ir.model', 'Related Document Model',
required=True, domain=[('osv_memory', '=', False)]),
'model': fields.related('model_id', 'model', type="char", string='Model'),
'create_date': fields.datetime('Create Date', readonly=1),
'active': fields.boolean('Active',
help="When unchecked, the rule is hidden and will not be executed."),
'sequence': fields.integer('Sequence',
help="Gives the sequence order when displaying a list of rules."),
'kind': fields.selection(
[('on_create', 'On Creation'),
('on_write', 'On Update'),
('on_create_or_write', 'On Creation & Update'),
('on_time', 'Based on Timed Condition')],
string='When to Run'),
'trg_date_id': fields.many2one('ir.model.fields', string='Trigger Date',
help="When should the condition be triggered. If present, will be checked by the scheduler. If empty, will be checked at creation and update.",
domain="[('model_id', '=', model_id), ('ttype', 'in', ('date', 'datetime'))]"),
'trg_date_range': fields.integer('Delay after trigger date',
help="Delay after the trigger date." \
"You can put a negative number if you need a delay before the" \
"trigger date, like sending a reminder 15 minutes before a meeting."),
'trg_date_range_type': fields.selection([('minutes', 'Minutes'), ('hour', 'Hours'),
('day', 'Days'), ('month', 'Months')], 'Delay type'),
'trg_date_calendar_id': fields.many2one(
'resource.calendar', 'Use Calendar',
help='When calculating a day-based timed condition, it is possible to use a calendar to compute the date based on working days.',
ondelete='set null',
),
'act_user_id': fields.many2one('res.users', 'Set Responsible'),
'act_followers': fields.many2many("res.partner", string="Add Followers"),
'server_action_ids': fields.many2many('ir.actions.server', string='Server Actions',
domain="[('model_id', '=', model_id)]",
help="Examples: email reminders, call object service, etc."),
'filter_pre_id': fields.many2one('ir.filters', string='Before Update Filter',
ondelete='restrict',
domain="[('model_id', '=', model_id.model)]",
help="If present, this condition must be satisfied before the update of the record."),
'filter_id': fields.many2one('ir.filters', string='Filter',
ondelete='restrict',
domain="[('model_id', '=', model_id.model)]",
help="If present, this condition must be satisfied before executing the action rule."),
'last_run': fields.datetime('Last Run', readonly=1, copy=False),
}
_defaults = {
'active': True,
'trg_date_range_type': 'day',
}
def onchange_kind(self, cr, uid, ids, kind, context=None):
clear_fields = []
if kind in ['on_create', 'on_create_or_write']:
clear_fields = ['filter_pre_id', 'trg_date_id', 'trg_date_range', 'trg_date_range_type']
elif kind in ['on_write', 'on_create_or_write']:
clear_fields = ['trg_date_id', 'trg_date_range', 'trg_date_range_type']
elif kind == 'on_time':
clear_fields = ['filter_pre_id']
return {'value': dict.fromkeys(clear_fields, False)}
def _filter(self, cr, uid, action, action_filter, record_ids, context=None):
""" filter the list record_ids that satisfy the action filter """
if record_ids and action_filter:
assert action.model == action_filter.model_id, "Filter model different from action rule model"
model = self.pool[action_filter.model_id]
domain = [('id', 'in', record_ids)] + eval(action_filter.domain)
ctx = dict(context or {})
ctx.update(eval(action_filter.context))
record_ids = model.search(cr, uid, domain, context=ctx)
return record_ids
def _process(self, cr, uid, action, record_ids, context=None):
""" process the given action on the records """
model = self.pool[action.model_id.model]
# modify records
values = {}
if 'date_action_last' in model._all_columns:
values['date_action_last'] = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
if action.act_user_id and 'user_id' in model._all_columns:
|
values['user_id'] = action.act_user_id.id
if values:
model.write(cr, uid, record_ids, values, context=context)
if action.act_followers and hasattr(model, 'message_subscribe'):
follower_ids = map(int, action.act_followers)
model.message_subscribe(cr, uid, record_ids, follower_ids, context=context)
# execute server actions
if action.server_action_ids:
server_action_ids = map(int, action.
|
server_action_ids)
for record in model.browse(cr, uid, record_ids, context):
action_server_obj = self.pool.get('ir.actions.server')
ctx = dict(context, active_model=model._name, active_ids=[record.id], active_id=record.id)
action_server_obj.run(cr, uid, server_action_ids, context=ctx)
return True
def _register_hook(self, cr, ids=None):
""" Wrap the methods `create` and `write` of the models specified by
the rules given by `ids` (or all existing rules if `ids` is `None`.)
"""
updated = False
if ids is None:
ids = self.search(cr, SUPERUSER_ID, [])
for action_rule in self.browse(cr, SUPERUSER_ID, ids):
model = action_rule.model_id.model
model_obj = self.pool[model]
if not hasattr(model_obj, 'base_action_ruled'):
# monkey-patch methods create and write
def create(self, cr, uid, vals, context=None, **kwargs):
# avoid loops or cascading actions
if context and context.g
|
bennylope/django-firstclass
|
firstclass/south_migrations/0001_initial.py
|
Python
|
mit
| 1,013
| 0.00691
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db imp
|
ort db
from south.v2 import SchemaMigration
from django.db import models
class Migration(Schem
|
aMigration):
def forwards(self, orm):
# Adding model 'Message'
db.create_table('firstclass_message', (
('key', self.gf('django.db.models.fields.CharField')(max_length=40, primary_key=True)),
('data', self.gf('django.db.models.fields.TextField')(default='{}')),
))
db.send_create_signal('firstclass', ['Message'])
def backwards(self, orm):
# Deleting model 'Message'
db.delete_table('firstclass_message')
models = {
'firstclass.message': {
'Meta': {'object_name': 'Message'},
'data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'})
}
}
complete_apps = ['firstclass']
|
fwenzel/strassendeutsch
|
woerterbuch/__init__.py
|
Python
|
gpl-3.0
| 739
| 0.005413
|
from flask import Flask
from flaskext.cache import Cache
from flaskext.mongokit import BSONObjectIdConverter
from werkzeug.routing import Base
|
Converter
import settings
app = Flask(__name__)
app.config.from_object('woerterbuch.settings')
app.secret_key = settings.SECRET_KEY
## Hook up custom URL converters.
class RegexConverter(BaseConverter):
"""Regex-powered url converter."""
def __init__(self, url_map, *it
|
ems):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
app.url_map.converters['regex'] = RegexConverter
app.url_map.converters['ObjectId'] = BSONObjectIdConverter
# Caching
cache = Cache(app)
# Templates
import woerterbuch.context_processors
# Views
import woerterbuch.views
|
tony-rasskazov/meteo
|
weewx/bin/weeutil/rsyncupload.py
|
Python
|
mit
| 6,592
| 0.005765
|
#
# Copyright (c) 2012 Will Page <compenguy@gmail.com>
# Derivative of ftpupload.py, credit to Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
# $Id: rsyncupload.py 2766 2014-12-02 02:45:36Z tkeffer $
#
"""For uploading files to a remove server via Rsync"""
import os
import errno
import sys
import subprocess
import syslog
import time
class RsyncUpload(object):
"""Uploads a directory and all its descendants to a remote server.
Keeps track of what files have changed, and only updates changed files."""
def __init__(self, local_root, remote_root,
server, user=None, delete=False, port=None,
ssh_options=None, compress=False, log_success=True):
"""Initialize an instance of RsyncUpload.
After initializing, call method run() to perform the upload.
server: The remote server to which the files are to be uploaded.
user: The user name that is to be used. [Optional, maybe]
delete: delete remote files that don't match with local files. Use
with caution. [Optional. Default is False.]
"""
self.local_root = os.path.normpath(local_root)
self.remote_root = os.path.normpath(remote_root)
self.server = server
self.user = user
self.delete = delete
self.port = port
self.ssh_options = ssh_options
self.compress = compress
self.log_success = log_success
def run(self):
"""Perform the actual upload."""
t1 = time.time()
# If the source path ends with a slash, rsync interprets
# that as a request to copy all the directory's *contents*,
# whereas if it doesn't, it copies the entire directory.
# We want the former, so make it end with a slash.
if self.local_root.endswith(os.sep):
rsynclocalspec = self.local_root
else:
rsynclocalspec = self.local_root + os.sep
if self.user is not None and len(self.user.strip()) > 0:
rsyncremotespec = "%s@%s:%s" % (self.user, self.server, self.remote_root)
else:
rsyncremotespec = "%s:%s" % (self.server, self.remote_root)
if self.port is not None and len(self.port.strip()) > 0:
rsyncsshstring = "ssh -p %s" % (self.port,)
else:
rsyncsshstring = "ssh"
if self.ssh_options is not None and len(self.ssh_options.strip()) > 0:
rsyncsshstring = rsyncsshstring + " " + self.ssh_options
cmd = ['rsync']
# archive means:
# recursive, copy symlinks as symlinks, preserve permissions,
# preserve modification times, preserve group and owner,
# preserve device files and special files, but not ACLs,
# no hardlinks, and no extended attributes
cmd.extend(["--archive"])
# provide some stats on the transfer
cmd.extend(["--stats"])
# Remove files remotely when they're removed locally
if self.delete:
cmd.extend(["--delete"])
if self.compress:
cmd.extend(["--compress"])
cmd.extend(["-e %s" % rsyncsshstring])
cmd.extend([rsynclocalspec])
cmd.extend([rsyncremotespec])
try:
|
rsynccmd = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = rsynccmd.communicate()[0]
stroutput = stdout.encode("utf-8").strip()
except OSError, e:
if e.errno == errno.ENOENT:
syslog.syslog(syslog.LOG_ERR, "rsyncupload: rsync does not appear to be installe
|
d on this system. (errno %d, \"%s\")" % (e.errno, e.strerror))
raise
# we have some output from rsync so generate an appropriate message
if stroutput.find('rsync error:') < 0:
# no rsync error message so parse rsync --stats results
rsyncinfo = {}
for line in iter(stroutput.splitlines()):
if line.find(':') >= 0:
(n,v) = line.split(':', 1)
rsyncinfo[n.strip()] = v.strip()
# get number of files and bytes transferred and produce an
# appropriate message
try:
if 'Number of regular files transferred' in rsyncinfo:
N = rsyncinfo['Number of regular files transferred']
else:
N = rsyncinfo['Number of files transferred']
Nbytes = rsyncinfo['Total transferred file size']
if N is not None and Nbytes is not None:
rsync_message = "rsync'd %d files (%s) in %%0.2f seconds" % (int(N), Nbytes)
else:
rsync_message = "rsync executed in %0.2f seconds"
except:
rsync_message = "rsync executed in %0.2f seconds"
else:
# suspect we have an rsync error so tidy stroutput
# and display a message
stroutput = stroutput.replace("\n", ". ")
stroutput = stroutput.replace("\r", "")
syslog.syslog(syslog.LOG_ERR, "rsyncupload: [%s] reported errors: %s" % (cmd, stroutput))
rsync_message = "rsync executed in %0.2f seconds"
t2= time.time()
if self.log_success:
syslog.syslog(syslog.LOG_INFO, "rsyncupload: " + rsync_message % (t2-t1))
if __name__ == '__main__':
import weewx
import configobj
weewx.debug = 1
syslog.openlog('rsyncupload', syslog.LOG_PID|syslog.LOG_CONS)
syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG))
if len(sys.argv) < 2 :
print """Usage: rsyncupload.py path-to-configuration-file [path-to-be-rsync'd]"""
sys.exit(weewx.CMD_ERROR)
try :
config_dict = configobj.ConfigObj(sys.argv[1], file_error=True)
except IOError:
print "Unable to open configuration file ", sys.argv[1]
raise
if len(sys.argv) == 2:
try:
rsync_dir = os.path.join(config_dict['WEEWX_ROOT'],
config_dict['StdReport']['HTML_ROOT'])
except KeyError:
print "No HTML_ROOT in configuration dictionary."
sys.exit(1)
else:
rsync_dir = sys.argv[2]
rsync_upload = RsyncUpload(
rsync_dir,
**config_dict['StdReport']['RSYNC'])
rsync_upload.run()
|
giuva90/TreeBot
|
bot.py
|
Python
|
gpl-3.0
| 11,462
| 0.022596
|
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
import configparser
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, RegexHandler, ConversationHandler
from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove)
from copy import deepcopy
import logging
import logging.handlers
from decisionTreeSupport import init, convert, getClassName
import xml.etree.ElementTree as ET
tree = ET.parse('config.xml.localSafeCopy')
root = tree.getroot()
Telegram_BOTID = root.find('telegramBotID').text
AdminPassword = root.find('adminPassword').text
datasets = {}
for ds in root.findall('dataset'):
name = ds.get('name')
datasets[name] = {}
datasets[name]['dataset_name'] = ds.find('filename').text
datasets[name]['class_column'] = int(ds.find('classColumn').text)
datasets[name]['data_columns'] = [int(x) for x in ds.find('dataColumns').text.split(',')]
if ds.find('successorOf') is not None:
datasets[name]['successorOf'] = ds.find('successorOf').text
datasets[name]['previousExitClass'] = ds.find('previousExitClass').text
del tree, root
CHOOSINGTREE, INTERACT = range(2)
LOG_FILENAME = 'logs.log'
treeData = {}
availableClassifierName = []
logging.basicConfig(filename=LOG_FILENAME, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.addHandler(logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=20000000, backupCount=5))
from botFunctions import *
def start(bot, update):
user = update.message.from_user
logger.debug("User %s typed /start." % user.name)
message = "Ciao, e benvenuto!"
message += "\nSono ancora in sviluppo, ecco la lista dei comandi attualmente disponibili:" \
"\n/exploretree Inizia ad esplorare gli alberi" \
"\n/help mostra la lista dei comandi disponibili"
bot.send_message(chat_id=update.message.chat_id, text=message)
def startInteraction(bot, update, chat_data):
user = update.message.from_user
logger.debug("User %s is starting the interaction." % user.name)
chat_data = {}
reply_keyboard = []
for k in availableClassifierName:
if 'isSuccessors' not in treeData[k]:
reply_keyboard.append([k])
reply_keyboard.append(['/cancel'])
update.message.reply_text('Ciao, scegli cosa vuoi che indovini.\n\n /cancel se vuoi terminare! ',
reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
return INTERACT
def interactionManager(bot, update, chat_data):
chose = update.message.text
if chose in treeData:
chat_data['chose'] = chose
return interact(bot, update, chat_data, chose)
elif 'chose' in chat_data:
return interact(bot, update, chat_data, chat_data['chose'])
else:
bot.send_message(chat_id=update.message.chat_id, text="Scusa, ma non credo di disporre di questo dato...")
return startInteraction(bot, update, chat_data)
def interact(bot, update, chat_data, chose):
# Retrieve the data dictionary for tree interactionManager
if chose in chat_data:
data = chat_data[chose]
else:
data = deepcopy(treeData[chose])
chat_data[chose] = data
chat_data['step'] = 1 # 1 = ask question, 0 = process answer
if 'conversationHistory' not in chat_data:
chat_data['conversationHistory'] = {}
dt = treeData['dt' + chose]
while not data['__stop']:
toAsk = data['toAsk']
if data['step'] == 1:
if 'isSuccessors' in data and toAsk['feature'] in chat_data['conversationHistory']:
chat_data['step'] = 0
update.message.text = str(chat_data['conversationHistory'][toAsk['feature']])
if 'valueRange' in toAsk:
# IF the feature has numeric value within an interval:
if chat_data['step']:
question = data['questions'][toAsk['feature']] + "Range: " + str(toAsk['valueRange'])
update.message.reply_text(question, reply_markup=ReplyKeyboardRemove())
chat_data['step'] = 0
return INTERACT
else:
user_value_for_feature = convert(update.message.text.strip())
if toAsk['valueRange'][0] <= user_value_for_feature <= toAsk['valueRange'][1]:
chat_data['conversationHistory'][toAsk['feature']] = user_value_for_feature
data['step'] = 0
data['s'][toAsk['feature']] = user_value_for_feature
data = dt.classify_by_asking_questions(data['actualNode'], data)
chat_data['step'] = 1
else:
question = data['questions'][toAsk['feature']] + "Range: " + str(toAsk['valueRange'])
update.message.reply_text(question, reply_markup=ReplyKeyboardRemove())
return INTERACT
elif 'possibleAnswer' in toAsk:
# If the features has a symbolic value
if chat_data['step']:
if 'featuresHumanization' in data and toAsk['feature'] in data['featuresHumanization']:
reply_keyboard = [[str(x) for x in data['featuresHumanization'][toAsk['feature']]]]
else:
reply_keyboard = [[str(x) for x in toAsk['possibleAnswer']]]
update.message.reply_text(data['questions'][toAsk['feature']],
reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
chat_data['step'] = 0
return INTERACT
else:
if 'featuresHumanization' in data and toAsk['feature'] in data['featuresHumanization']:
user_value_for_feature = convert(
data['featuresHumanization'][toAsk['feature']][update.message.text.strip()])
else:
user_value_for_feature = convert(update.message.text.strip())
if user_value_for_feature in toAsk['possibleAnswer']:
chat_data['conversationHistory'][toAsk['feature']] = user_value_for_feature
data['step'] = 0
data['toAsk']['givenAnswer'] = user_value_for_feature
data = dt.classify_by_asking_questions(data['actualNode'], data)
chat_data['step'] = 1
else:
if 'featuresHumanization' in data and toAsk['feature'] in data['featuresHumanization']:
reply_keyboard = [[str(x) for x in data['featuresHumanization'][toAsk['feature']]]]
else:
reply_keyboard = [[str(x) for x in toAsk['possibleAnswer']]]
update.message.reply_text("Valore non valido!\n" + data['questions'][toAsk['feature']],
reply_markup=ReplyKeyboardMarkup(reply_keyboard,
one_time_keyboard=True))
return INTERACT
else:
logger.critical("Sono finito in uno stato morto...")
logger.critical("Albero: " + chat_data[chose])
logger.critical("Conversation Detal: \n" + str(chat_data['conversationHistory']))
del chat_data[chose], data, chat_data['chose'], chat_data['conversationHistory']
update.message.reply_text(
"Perdona, mi sono rotto un braccio! devo scappare in ospedale :("
"\nTi lascio con mio fratello, ma devi ricominciare.",
reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
# update.message.reply_text("Ottimo! Ho trovato qualcosa!\n")
message = ""
classification = data['a']
del classification['solution_path']
which_classes = list(classification.keys())
which_classes = sorted(which_classes, key=lambda x: classification[x], reverse=True)
if classification[which_classes[0]] < 1:
message += "\nEcco la probabilità delle risposte, io sceglierei la prima ;)\n"
message += "\n " + str.ljust("Classe", 30) + "Probabilità"
message += "\n ---------- -----------"
for which_class in which_classes:
if which_class is not 'solution_path' and classification[which_class] > 0:
message += "\n " + str.ljust(getClassName(which_class), 30) + str(
round(classification[which_class], 2))
else:
if 'singleAnswer' in data['interaction']:
message += data['interaction']['singleAnswer'] + '\n'
else:
message += "\n\nSai cosa?, sono quasi sicuro che la risposta corretta sia "
if str(which_classes[0][5:]) in data['classHumanization']:
mes
|
sage += getClassName(data['classHumanization'][str(which_classes[0][5:])])
else:
message += getClassName(str(which_classes[0]))
# handling of connection among tree
if 'hasSuccessors' in data:
update.message.reply_text("Credo di essere sulla buona strada...\n")
chat_
|
data['chose'] = data['successorsMap'][getClassName(which_classes[0])]
d
|
ahmadshahwan/cohorte-runtime
|
python/cohorte/config/parser.py
|
Python
|
apache-2.0
| 14,427
| 0
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
COHORTE configuration file parser: converts a parsed configuration file to
beans
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python standard library
import collections
import logging
import uuid
# iPOPO Decorators
from pelix.ipopo.decorators import ComponentFactory, Provides, Instantiate, \
Requires
# COHORTE constants
import cohorte
# ------------------------------------------------------------------------------
# Documentation strings format
__docformat__ = "restructuredtext en"
# Version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Component to be instantiated
Component = collections.namedtuple(
'Component', ('factory', 'name', 'properties'))
# Bundle to be installed
Bundle = collections.namedtuple(
'Bundle', ('name', 'filename', 'properties', 'version', 'optional'))
# Simplest configuration possible
BootConfiguration = collections.namedtuple(
'BootConfiguration', ('bundles', 'composition', 'properties',
'environment', 'boot_args'))
# Boot configuration + Isolate basic description
Isolate = collections.namedtuple(
'Isolate', BootConfiguration._fields + ('name', 'kind', 'node',
'level', 'sublevel'))
def _recursive_namedtuple_convert(data):
"""
Recursively converts the named tuples in the given object to dictionaries
:param data: An object in a named tuple or its children
:return: The converted object
"""
if isinstance(data, list):
# List
return [_recursive_namedtuple_convert(item) for item in data]
elif hasattr(data, '_asdict'):
# Named tuple
dict_value = dict(data._asdict())
for key, value in dict_value.items():
dict_value[key] = _recursive_namedtuple_convert(value)
return dict_value
else:
# Standard object
return data
# ------------------------------------------------------------------------------
@ComponentFactory('cohorte-config-parser-factory')
@Provides(cohorte.SERVICE_CONFIGURATION_READER)
@Requires('_reader', cohorte.SERVICE_FILE_READER)
@Instantiate('cohorte-config-parser')
class BootConfigParser(object):
"""
Boot configuration parser
"""
def __init__(self):
"""
Sets up the members
"""
# File reader
self._reader = None
# Loaded isolates configurations
self._isolates = None
@staticmethod
def _parse_bundle(json_object):
"""
Reads the given JSON object and returns its Bundle representation
:param json_object: A parsed JSON object
:return: A Bundle object
:raise KeyError: A mandatory parameter is missing
"""
# Use a copy of the properties
properties = {}
json_properties = json_object.get('properties')
if json_properties:
properties.update(json_properties)
return Bundle(name=json_object['name'],
filename=json_object.get('file'),
properties=properties,
version=json_object.get('version'),
optional=json_object.get('optional', False))
def _parse_bundles(self, bundles):
"""
Parses the bundles in the given list. Returns an empty list if the
given one is None or empty.
:param bundles: A list of bundles representations
:return: A list of Bundle objects
:raise Key
|
Error: A mandatory parameter is missing
"""
if not bundles:
return []
return [self._parse_bundle(bundle) for bundle in bundles]
@staticmethod
def _parse_component(json_object):
"""
|
Reads the given JSON object and returns its Component representation
:param json_object: A parsed JSON object
:return: A Component object
:raise KeyError: A mandatory parameter is missing
"""
# Mandatory values
factory = json_object['factory']
# Computed name (if needed)
name = json_object.get('name', factory + '-instance')
# Use a copy of the properties
properties = {}
json_properties = json_object.get('properties')
if json_properties:
properties.update(json_properties)
return Component(factory=factory, name=name, properties=properties)
def _parse_components(self, components):
"""
Parses the components in the given list. Returns an empty list if the
given one is None or empty.
:param components: A list of components representations
:return: A list of Component objects
:raise KeyError: A mandatory parameter is missing
"""
if not components:
return []
return [self._parse_component(component) for component in components]
def _parse_isolate(self, json_object):
"""
Reads the given JSON object and returns its Isolate representation
:param json_object: A parsed JSON object
:return: An Isolate object
:raise KeyError: A mandatory parameter is missing
"""
# Reuse the boot parser
boot_config = self.load_boot_dict(json_object)
return Isolate(name=json_object['name'],
kind=json_object['kind'],
level=json_object['level'],
sublevel=json_object['sublevel'],
# Reuse boot configuration values
**boot_config._asdict())
def _prepare_configuration(self, uid, name, kind,
bundles=None, composition=None,
base_configuration=None):
"""
Prepares and returns a configuration dictionary to be stored in the
configuration broker, to start an isolate of the given kind.
:param uid: The isolate UID
:param name: The isolate name
:param kind: The kind of isolate to boot
:param bundles: Extra bundles to install
:param composition: Extra components to instantiate
:param base_configuration: Base configuration (to override)
:return: A configuration dictionary
(updated base_configuration if given)
:raise IOError: Unknown/unaccessible kind of isolate
:raise KeyError: A parameter is missing in the configuration files
:raise ValueError: Error reading the configuration
"""
if isinstance(base_configuration, dict):
configuration = base_configuration
else:
configuration = {}
# Set up isolate properties
configuration['uid'] = uid \
or configuration.get('custom_uid') or str(uuid.uuid4())
configuration['name'] = name
configuration['kind'] = kind
# Boot configuration for this kind
new_boot = configuration.setdefault('boot', {})
new_boot.update(_recursive_namedtuple_convert(self.load_boot(kind)))
# Add bundles (or an empty list)
if bundles:
new_bundles = configuration.setdefault('bundles', [])
new_bundles.extend(_recursive_namedtuple_convert(
[self.normalize_bundle(bundle) for bundle in bundle
|
saymedia/seosuite
|
seoreporter/__init__.py
|
Python
|
mit
| 13,442
| 0.002232
|
# -*- coding: utf-8 -*-
# usage:
# > python seoreporter/__init__.py [type] [format] [run_id]
# example:
# > python seoreporter/__init__.py build junit d09b8571-5c8a-42ff-8ab7-c38f4f8871c4
# to
|
do
# output valid jUnit XML outpu
|
t
# output html files in a folder
# output html pages that show the data
# output json
import yaml
import time
import datetime
import os
import MySQLdb
start = None
def report(db, report_type, report_format, run_id):
global start
report_data = []
start = time.time()
# print [report_type, report_format, run_id]
if report_type == 'build':
report_data = build_report(db, run_id)
elif report_type == 'status_code':
report_data = status_code_report(db, run_id)
elif report_type == 'all':
report_data = all_report(db, run_id)
else:
raise Exception('Report type not supported')
if report_format == 'junit':
return junit_format(report_type, report_data, run_id)
elif report_format == 'csv':
return csv_format(report_type, report_data, run_id)
elif report_format == 'xls':
return xls_format(report_type, report_data, run_id)
elif report_format == 'sql':
return sql_format(report_type, report_data, run_id)
elif report_format == 'html_files':
return html_files_format(report_type, report_data, run_id)
else:
raise Exception('Report format not supported')
def fetch_latest_run_id(db):
run_id = None
c = db.cursor()
c.execute('SELECT run_id FROM crawl_urls ORDER BY timestamp DESC LIMIT 1')
result = c.fetchone()
if result:
run_id = result[0]
return run_id
def all_report(db, run_id):
c = db.cursor(MySQLdb.cursors.DictCursor)
c.execute('''SELECT
id, run_id, level, content_hash, address, domain, path, external,
status_code, status, body, size, address_length, encoding, content_type,
response_time, redirect_uri, canonical, title_1, title_length_1,
title_occurences_1, meta_description_1, meta_description_length_1,
meta_description_occurrences_1, h1_1, h1_length_1, h1_2, h1_length_2,
h1_count, meta_robots, rel_next, rel_prev, lint_critical, lint_error,
lint_warn, lint_info, lint_results, timestamp
FROM crawl_urls WHERE run_id = %s ORDER BY timestamp DESC''', [run_id])
return [{
'name': 'all',
'fields': [
'id', 'run_id', 'level', 'content_hash', 'address', 'domain', 'path', 'external',
'status_code', 'status', 'body', 'size', 'address_length', 'encoding', 'content_type',
'response_time', 'redirect_uri', 'canonical', 'title_1', 'title_length_1',
'title_occurences_1', 'meta_description_1', 'meta_description_length_1',
'meta_description_occurrences_1', 'h1_1', 'h1_length_1', 'h1_2', 'h1_length_2',
'h1_count', 'meta_robots', 'rel_next', 'rel_prev', 'lint_critical', 'lint_error',
'lint_warn', 'lint_info', 'lint_results', 'timestamp',
],
'values': c.fetchall(),
}]
def status_code_report(db, run_id):
output = []
# c = db.cursor()
c = db.cursor(MySQLdb.cursors.DictCursor)
# 500 errors
# TODO add other error codes
c.execute('''SELECT address, timestamp, status_code FROM crawl_urls
WHERE run_id = %s AND external = 0 AND (status_code LIKE %s OR status_code = 0)
ORDER BY timestamp ASC''', (run_id, '5%',))
output.append({
'name': '5xx or 0 status codes',
'fields': ['address', 'timestamp', 'status_code'],
'values': c.fetchall(),
})
# 404s
c.execute('''SELECT address, timestamp, status_code FROM crawl_urls
WHERE run_id = %s AND external = 0 AND status_code LIKE %s
ORDER BY timestamp ASC''', (run_id, '4%',))
output.append({
'name': '4xx status codes',
'fields': ['address', 'timestamp', 'status_code'],
'values': c.fetchall(),
})
return output
def build_report(db, run_id):
output = []
# c = db.cursor()
c = db.cursor(MySQLdb.cursors.DictCursor)
# 500 errors
# TODO add other error codes
c.execute('''SELECT address, timestamp, status_code FROM crawl_urls
WHERE run_id = %s AND external = 0 AND (status_code LIKE %s OR status_code = 0)
ORDER BY timestamp ASC''', (run_id, '5%',))
output.append({
'name': '5xx or 0 status codes',
'fields': ['address', 'timestamp', 'status_code'],
'values': c.fetchall(),
})
# 404s
c.execute('''SELECT address, timestamp, status_code FROM crawl_urls
WHERE run_id = %s AND external = 0 AND status_code LIKE %s
ORDER BY timestamp ASC''', (run_id, '4%',))
output.append({
'name': '4xx status codes',
'fields': ['address', 'timestamp', 'status_code'],
'values': c.fetchall(),
})
# missing canonicals
c.execute('''SELECT address, timestamp FROM crawl_urls
WHERE run_id = %s AND external = 0 AND content_type = 'text/html' AND canonical IS NULL
ORDER BY timestamp ASC''', (run_id,))
output.append({
'name': 'missing canonical',
'fields': ['address', 'timestamp'],
'values': c.fetchall(),
})
# missing titles
c.execute('''SELECT address, timestamp FROM crawl_urls
WHERE run_id = %s AND external = 0 AND content_type = 'text/html' AND title_1 IS NULL
ORDER BY timestamp ASC''', (run_id,))
output.append({
'name': 'missing title',
'fields': ['address', 'timestamp'],
'values': c.fetchall(),
})
# missing meta descriptions
c.execute('''SELECT address, timestamp FROM crawl_urls
WHERE run_id = %s AND external = 0 AND content_type = 'text/html' AND meta_description_1 IS NULL
ORDER BY timestamp ASC''', (run_id,))
output.append({
'name': 'missing meta_description',
'fields': ['address', 'timestamp'],
'values': c.fetchall(),
})
# lint level critical
c.execute('''SELECT address, timestamp, lint_critical FROM crawl_urls
WHERE run_id = %s AND external = 0 AND lint_critical > 0
ORDER BY timestamp ASC''', (run_id,))
output.append({
'name': 'lint level critical',
'fields': ['address', 'timestamp', 'lint_critical'],
'values': c.fetchall(),
})
# lint level error
c.execute('''SELECT address, timestamp, lint_error FROM crawl_urls
WHERE run_id = %s AND external = 0 AND lint_error > 0
ORDER BY timestamp ASC''', (run_id,))
output.append({
'name': 'lint level error',
'fields': ['address', 'timestamp', 'lint_error'],
'values': c.fetchall(),
})
return output
# junit schema:
# https://svn.jenkins-ci.org/trunk/hudson/dtkit/dtkit-format/dtkit-junit-model\
# /src/main/resources/com/thalesgroup/dtkit/junit/model/xsd/junit-4.xsd
def junit_format(report_type, tests, run_id):
global start
errors = 0
output = ''
def junit_row(values):
o = ''
for v in values:
o += '\t\t<error type="addresses">%s</error>\n' % str(v['address'])
return o
def junit_row_flat(values):
o = ''
# for v in values:
o += '\t\t<error type="addresses">%s</error>\n' % (", ".join([v['address'] for v in values]))
return o
for test in tests:
# header
output += '\t<testcase name="%s">\n' % (test['name'])
# values
if test['values'] and len(test['values']) > 0:
errors += len(test['values'])
# put everything in one element because jenkins ignores > 1
output += junit_row_flat(test['values'])
# footer
output += '\t</testcase>\n'
header = '''<?xml version="1.0" encoding="UTF-8"?>
<testsuite
name="seoreporter-%s"
tests="%s"
timestamp="%s"
time="%s"
errors="%s"
failures=""
id="%s"
package="seoreporter"
skipped="0">\n''' % (
report_type,
len(tests),
datetime.datetime.utcnow(),
time.time(
|
bmya/tkobr-addons
|
tko_point_of_sale_discount_cards/__openerp__.py
|
Python
|
agpl-3.0
| 2,070
| 0.004348
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# Thinkopen Brasil
# Copyright (C) Thinkopen Solutions Brasil (<http://www.tkobr.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'tko_point_of_sale_discount_cards',
'version': '0.032',
'description': 'This module applies selected discount on total',
'category': 'Customizations',
'sequence': 150,
'complexity': 'pos_customization',
'author': 'ThinkOpen Solutions Brasil',
'website': 'http://www.tkobr.com',
'images': ['images/oerp61.jpeg',
],
'depends': [
'point_of_sale',
'tko_point_of_sale_discount_on_order',
],
'data': [
'security/ir.model.access.csv',
|
'point_of_sale_view.xml',
'static/src/xml/pos.xml',
],
'qweb' : ['static/src/xml/discount.xml',],
'init': [],
'demo':
|
[],
'update': [],
'test': [], #YAML files with tests
'installable': True,
'application': False,
'auto_install': False, #If it's True, the modules will be auto-installed when all dependencies are installed
'certificate': '',
}
|
sassoftware/robj
|
robj_test/robjtest/httptest.py
|
Python
|
apache-2.0
| 3,456
| 0.000579
|
#!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from robj.http import HTTPClient
from robj_test import robjhelp as testsuite
class HTTPClientTest(testsuite.TestCase):
def setUp(self):
testsuite.TestCase.setUp(self)
self.client = HTTPClient(self.server.geturi('/api/'), maxClients=1)
def testGET(self):
req = self.client.do_GET('/')
req.wait()
self.failUnlessEqual(self.getXML('/api'), req.response.content.read())
def testGETError(self):
req = self.client.do_GET('/foobar')
req.wait()
self.failUnlessEqual(req.response.status, 404)
def testPOST(self):
employee1 = self.getArchiveContents('employee1.xml')
req = self.client.do_POST('/employees', employee1)
req.wait()
clientEmployee = req.response.content.read()
self.failUnlessEqual(clientEmployee, self.getXML('/api/employees/0'))
def testPOSTError(self):
raise testsuite.SkipTestException, ('disable until automated build '
'failures can be diagnosed')
employee1 = self.getArchiveContents('employee1.xml')
req = self.client.do_POST('/', employee1)
req.wait()
self.failUnlessEqual(req.response.status, 501)
def testPUT(self):
# First post some data so that we can then update it.
employee1 = self.getArchiveContents('employee1.xml')
req = self.client.do_POST('/employees', employee1)
req.wait()
xml = req.response.content.read()
# Change the employees name from Fred to Bob.
xml2 = xml.replace('Fred', 'Bob')
req2 = self.client.do_PUT('/employees/0', xml2)
req2.wait()
respxml = req2.response.content.read()
self.failUnlessEqual(xml2, respxml)
self.failUnlessEqual(respxml, self.getXML('/api/employees/0'))
def testPUTError(self):
raise testsuite.SkipTestException, ('disable until automated build '
'failures can be diagnosed')
req = self.client.do_GET('/')
req.wait()
xml = req.response.content.read()
xml2 = xml.replace('1.0', '2.0')
req2 = self.client.do_PUT('/', xml2)
req2.wait()
self.failUnlessEqual(req2.response.status, 501)
def testDELETE(self):
# First post some data so that we can then u
|
pdate it.
employee1 = self.getArchiveContents('employee1.xml')
req = self.client.do_POST('/employees', employee1)
|
req.wait()
req2 = self.client.do_DELETE('/employees/0')
req2.wait()
self.failUnlessEqual(req2.response.status, 200)
req3 = self.client.do_DELETE('/employees/0')
req3.wait()
self.failUnlessEqual(req3.response.status, 404)
def testDELETEError(self):
req = self.client.do_DELETE('/')
req.wait()
self.failUnlessEqual(req.response.status, 501)
|
spjmurray/openstack-sentinel
|
sentinel/tests/functional/metering/v2/test_meters.py
|
Python
|
apache-2.0
| 1,567
| 0.005105
|
# Copyright 2017 DataCentred Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific lang
|
uage governing permissions and limitations
# under the License.
from sentinel.tests.functional import base
from sentinel.tests.functional import client_fixtures
|
as fixtures
COMPUTE_CREATE_START_QUERY = [
{'field': 'event_type', 'op': 'eq', 'value': 'compute.instance.create.start'}
]
class MeteringV2MetersTestCase(base.BaseTestCase):
def test_meters_by_type(self):
grant = self.useFixture(fixtures.UserProjectGrant(self.sentinel))
client = base.FederatedUserClient(grant.user.entity, grant.project.entity)
server = self.useFixture(fixtures.Server(client))
samples = self.sentinel.metering.samples.list(meter_name='vcpus')
resources = [s.resource_id for s in samples]
self.assertIn(server.entity.id, resources)
#events = self.sentinel.metering.events.list(q=COMPUTE_CREATE_START_QUERY)
#instances = [t['value'] for e in events for t in e['traits'] if t['name'] == 'instance_id']
#self.assertIn(server.entity.id, instances)
# vi: ts=4 et:
|
ayushgoel/FixGoogleContacts
|
phonenumbers/data/region_878.py
|
Python
|
mit
| 1,847
| 0.009746
|
"""Auto-generated file, do not edit by hand. 878 metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_878 = PhoneMetadata(id='001', country_code=878, international_prefix=None,
|
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{11}', possible_number_pattern='\\d{12}', example_number='101234567890'),
fixed_line=PhoneNumberDesc(national_number_pattern='
|
NA', possible_number_pattern='NA', example_number='101234567890'),
mobile=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA', example_number='101234567890'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='10\\d{10}', possible_number_pattern='\\d{12}', example_number='101234567890'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
short_code=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
number_format=[NumberFormat(pattern='(\\d{2})(\\d{5})(\\d{5})', format='\\1 \\2 \\3')])
|
georgyberdyshev/ascend
|
models/johnpye/fprops/python/solve_ph_array.py
|
Python
|
gpl-2.0
| 2,054
| 0.041383
|
from fprops import *
from pylab import *
import sys
#P = fluid('water','helmholtz');
#P = fluid('ammonia','pengrob');
P = fluid('carbondioxide','pengrob');
print "SOLVING TRIPLE POINT..."
print "Fluid: %s\nData source: %s" %(P.name, P.source)
try:
p_t, rhof_t, rhog_t = P.triple_point()
except RuntimeError,e:
print "failed to solve triple point"
sys.exit(1)
pmax = 100e6
Tmin = P.T_t
if Tmin == 0:
Tmin = 0.4 * P.T_c
Tmax = 2 * P.T_c
vmin = 1./rhof_t
vmax = 2./rhog_t
TT = linspace(Tmin, Tmax, 100);
vv = logspace(log10(vmin),log10(vmax), 100);
goodT = []
goodv = []
badT = []
badv = []
for T in TT:
sys.stderr.write("+++ T = %f\r" % (T))
for v in vv:
rho = 1./v
S = P.set_Trho(T,rho)
p = S.p
if p > pmax:
continue
h = S.h
#print " p = %f bar, h = %f kJ/kg" % (p/1e5,h/1e3)
if(h > 8000e3):
co
|
ntinue
try:
S = P.set_ph(p,h)
T1 = S.T
rho1 = S.rho
except ValueError,e:
print "ERROR %s at p = %f, h = %f (T = %.12e, rho = %.12e)" % (str(e),p, h,T,rho)
badT.append(T); badv.append(v)
continue
if isnan(T1) or isnan(rho1):
print "ERROR at T1 = %f, rho1 = %f (T = %.12e, rho = %.12
|
e)" % (T1, rho1,T,rho)
badT.append(T); badv.append(v)
else:
goodT.append(T); goodv.append(v)
#print " +++ GOOD RESULT T1 = %f, rho1 = %f" % (T1, rho1)
figure()
print "i \tbad T \tbad v"
for i in range(len(badT)):
print "%d\t%e\t%e" % (i,badT[i], badv[i])
print "TOTAL %d BAD POINTS" % (len(badT))
print "AXIS =",axis()
semilogx(badv, badT, 'rx')
axis([vmin,vmax,Tmin,Tmax])
print "AXIS =",axis()
hold(1)
semilogx(goodv, goodT, 'g.')
# plot saturation curves
TTs = linspace(P.T_t, P.T_c, 300)
TT1 = []
vf1 = []
vg1 = []
for T in TTs:
try:
S = P.set_Tx(T,0)
p = S.p
rhof = S.rho
S = P.set_Tx(T,1)
rhog = S.rho
except:
continue;
TT1.append(T)
vf1.append(1./rhof)
vg1.append(1./rhog)
semilogx(vf1,TT1,"b-")
semilogx(vg1,TT1,"b-")
axis([vmin,vmax,Tmin,Tmax])
title("convergence of (p,h) solver for %s" % P.name)
xlabel("specific volume")
ylabel("temperature")
show()
ion()
|
brahmastra2016/bleachbit
|
tests/TestWindows.py
|
Python
|
gpl-3.0
| 13,915
| 0.000216
|
# vim: ts=4:sw=4:expandtab
# -*- coding: UTF-8 -*-
# BleachBit
# Copyright (C) 2008-2017 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test case for module Windows
"""
from __future__ import absolute_import, print_function
from tests import common
from bleachbit.FileUtilities import extended_path, extended_path_undo
from bleachbit.Windows import *
from bleachbit import logger, FSE
import sys
import tempfile
import unittest
import platform
from decimal import Decimal
if 'win32' == sys.platform:
import _winreg
from win32com.shell import shell
def put_files_into_recycle_bin():
"""Put a file and a folder into the recycle bin"""
# make a file and move it to the recycle bin
import tempfile
tests = ('regular', u'unicode-emdash-u\u2014', 'long' + 'x' * 100)
for test in tests:
(fd, filename) = tempfile.mkstemp(
prefix='bleachbit-recycle-file', suffix=test)
os.close(fd)
move_to_recycle_bin(filename)
# make a folder and move it to the recycle bin
dirname = tempfile.mkdtemp(prefix='bleachbit-recycle-folder')
common.touch_file(os.path.join(dirname, 'file'))
move_to_recycle_bin(dirname)
@unittest.skipUnless('win32' == sys.platform, 'not running on windows')
class WindowsTestCase(common.BleachbitTestCase):
"""Test case for module Windows"""
def test_get_recycle_bin(self):
"""Unit test for get_recycle_bin"""
for f in get_recycle_bin():
self.assert_(os.path.exists(extended_path(f)), f)
if not common.destructive_tests('get_recycle_bin'):
return
put_files_into_recycle_bin()
# clear recycle bin
counter = 0
for f in get_recycle_bin():
counter += 1
FileUtilities.delete(f)
self.assert_(counter >= 3, 'deleted %d' % counter)
# now it should be empty
for f in get_recycle_bin():
self.fail('recycle bin should be empty, but it is not')
def test_delete_locked_file(self):
"""Unit test for delete_locked_file"""
tests = ('regular', u'unicode-emdash-u\u2014', 'long' + 'x' * 100)
for test in tests:
(fd, pathname) = tempfile.mkstemp(
prefix='bleachbit-delete-locked-file', suffix=test)
os.close(fd)
self.assert_(os.path.exists(pathname))
try:
delete_locked_file(pathname)
except pywintypes.error as e:
if 5 == e.winerror and not shell.IsUserAnAdmin():
pass
else:
raise
self.assert_(os.path.exists(pathname))
logger.info('reboot Windows and check the three files are deleted')
def test_delete_registry_key(self):
"""Unit test for delete_registry_key"""
# (return value, key, really_delete)
tests = ((False, 'HKCU\\Software\\BleachBit\\DoesNotExist', False, ),
(False, 'HKCU\\Software\\BleachBit\\DoesNotExist', True, ),
(True, 'HKCU\\Software\\BleachBit\\DeleteThisKey', False, ),
(True, 'HKCU\\Software\\BleachBit\\DeleteThisKey', True, ), )
# create a nested key
key = 'Software\\BleachBit\\DeleteThisKey'
subkey = key + '\\AndThisKey'
hkey = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, subkey)
hkey.Close()
# test
for test in tests:
rc = test[0]
key = test[1]
really_delete = test[2]
return_value = delete_registry_key(key, really_delete)
self.assertEqual(rc, return_value)
if really_delete:
self.assertFalse(detect_registry_key(key))
# Test Unicode key. In BleachBit 0.7.3 this scenario would lead to
# the error (bug 537109)
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position
# 11: ordinal not in range(128)
key = r'Software\\BleachBit\\DeleteThisKey'
hkey = _winreg.CreateKey(
_winreg.HKEY_CURRENT_USER, key + r'\\AndThisKey-Ö')
hkey.Close()
return_value = delete_registry_key(u'HKCU\\' + key, True)
self.assertTrue(return_value)
return_value = delete_registry_key(u'HKCU\\' + key, True)
self.assertFalse(return_value)
def test_delete_registry_value(self):
"""Unit test for delete_registry_value"""
#
# test: value does exist
#
# create a name-value pair
key = 'Software\\BleachBit'
hkey = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, key)
value_name = 'delete_this_value_name'
_winreg.SetValueEx(
hkey, value_name, 0, _winreg.REG_SZ, 'delete this value')
hkey.Close()
# delete and confirm
self.assertTrue(
delete_registry_value('HKCU\\' + key, value_name, False))
self.assertTrue(
delete_registry_value('HKCU\\' + key, value_name, True))
self.assertFalse(
delete_registry_value('HKCU\\' + key, value_name, False))
self.assertFalse(
delete_registry_value('HKCU\\' + key, value_name, True))
#
# test: value does not exist
#
self.assertFalse(delete_registry_value(
'HKCU\\' + key, 'doesnotexist', False))
self.assertFalse(delete_registry_value(
'HKCU\\' + key, 'doesnotexist', True))
self.assertFalse(delete_registry_value(
'HKCU\\doesnotexist', value_name, False))
self.assertFalse(delete_registry_value(
'HKCU\\doesnotexist', value_name, True))
def test_detect_registry_key(self):
"""Test for detect_registry_key()"""
self.assert_(detect_registry_key('HKCU\\Software\\Microsoft\\'))
sel
|
f.assert_(not detect_registry_key('HKCU\\Software\\DoesNotExist'))
def test_get_autostart_path(self):
"""Unit test for get_autostart_path"""
pathname = get_autostart_path()
dirname = os.path.dirname(pathname)
|
self.assert_(os.path.exists(dirname),
'startup directory does not exist: %s' % dirname)
def test_get_known_folder_path(self):
"""Unit test for get_known_folder_path"""
version = platform.uname()[3][0:3]
ret = get_known_folder_path('LocalAppDataLow')
self.assertNotEqual(ret, '')
if version <= '6.0':
# Before Vista
self.assertEqual(ret, None)
return
# Vista or later
self.assertNotEqual(ret, None)
self.assert_(os.path.exists(ret))
def test_get_fixed_drives(self):
"""Unit test for get_fixed_drives"""
drives = []
for drive in get_fixed_drives():
drives.append(drive)
self.assertEqual(drive, drive.upper())
self.assert_("C:\\" in drives)
def test_get_windows_version(self):
"""Unit test for get_windows_version"""
v = get_windows_version()
self.assert_(v >= 5.1)
self.assert_(v > 5)
self.assert_(isinstance(v, Decimal))
def test_empty_recycle_bin(self):
"""Unit test for empty_recycle_bin"""
# check the function basically works
for drive in get_fixed_drives():
ret = empty_recycle_bin(drive, really_delete=False)
self.assert_(isinstance(ret, (int, long)))
if not common.destructive_tests('recycle bin'):
return
# check it deletes files for fixed drives
put_files
|
biomodels/MODEL1006230072
|
MODEL1006230072/model.py
|
Python
|
cc0-1.0
| 427
| 0.009368
|
import o
|
s
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1006230072.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromStrin
|
g(sbmlString)
|
acsone/acsone-addons
|
hr_timesheet_no_closed_project_task/models/project_task.py
|
Python
|
agpl-3.0
| 1,843
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of hr_timesheet_no_closed_project_task,
# an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# hr_timesheet_invoice_hide_to_invoice is free software:
# you can redistribute it and/or modify it under the terms of the GNU
# Affero General Public License as published by the Free Software
# Foundation,either version 3 of the License, or (at your option) any
# later version.
#
# hr_timesheet_invoice_hide_to_invoice is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with hr_timesheet_no_closed_project_task.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
PROJECT_SELECTION = [('templa
|
te', 'Template'),
('draft', 'New'),
('open', 'In Progress'),
('cancelled', 'Cancelled'),
('pending', 'Pending'),
('close', 'Closed')]
class ProjectTask(models.Model):
_inherit = 'project.task'
stage_closed = fields.Boolean(related='stage_id.closed', string='Closed',
readonly=True)
project_state = fields.S
|
election(PROJECT_SELECTION,
related='project_id.state',
string='Project State',
readonly=True)
|
laginha/yard
|
src/yard/resources/decorators/adapted.py
|
Python
|
mit
| 1,471
| 0.004759
|
#!/usr/bin/env python
# encoding: utf-8
from django.contrib.auth.decorators import (
permission_required as original_permission_required,
login_required as original_login_required,)
from keyauth.decorators import key_required as original_key_required
from functools import wraps
class DjangoToYardDecorator(object):
'''
Adapt django's decorators to yard resources
'''
def __init__(self, func):
self.original_decorator = func
def __call__(self, *args, **kwargs):
def decorator(func):
@wraps(func)
def wrapper(klass, request, *rargs, **rkwargs):
def func_wrapper(request, *a, **k):
return func(klass, request, *rargs, **rkwargs)
original_decorator = self.original_decorator(*args, **kwargs)
return original_decorator(func_wrapper)(
request, *rargs, **rkwargs)
return wrapper
return decorator
de
|
f login_required(*args, **kwargs):
'''
Check if user is authenticated
'''
return DjangoToYardDecorator( original_login_required )(*args, **kwargs)
def permission_required(*args, **kwargs):
'''
Check if user has permissions
'''
return DjangoToYardDecorator(original_permission_required)(*args, **kwargs)
def
|
key_required(*args, **kwargs):
'''
Check key for access
'''
return DjangoToYardDecorator( original_key_required )(*args, **kwargs)
|
larsks/gitblogger
|
gitblogger/lock.py
|
Python
|
gpl-3.0
| 966
| 0.009317
|
import os
import errno
class LockError (Exception):
pass
class LockIsLocked (LockError):
pass
class LockIsUnlocked (LockError):
pass
class Lock (object):
def __init__ (self, path):
self.path = path
self.locked = False
def
|
acquire (self):
if self.locked:
raise LocksLocked()
try:
os.mkdir(self.path)
self.locked = True
except OSError, detail:
if detail.errno == errno.EEXIST:
raise LockIsLocked()
else:
raise
def release (self):
if not self.locked:
raise LockIsUn
|
locked()
try:
os.rmdir(self.path)
self.locked = False
except OSError, detail:
if detail.errno == errno.ENOENT:
raise LockIsUnlocked()
else:
raise
def __del__ (self):
if self.locked:
self.release()
|
Juniper/contrail-dev-neutron
|
neutron/plugins/ml2/drivers/mechanism_odl.py
|
Python
|
apache-2.0
| 15,181
| 0
|
# Copyright (c) 2013-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Kyle Mestery, Cisco Systems, Inc.
# @author: Dave Tucker, Hewlett-Packard Development Company L.P.
import time
from oslo.config import cfg
import requests
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.extensions import portbindings
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log
from neutron.plugins.common import constants
from neutron.plugins.ml2 import driver_api as api
LOG = log.getLogger(__name__)
ODL_NETWORK = 'network'
ODL_NETWORKS = '
|
networks'
ODL_SUBNET = 'subnet'
ODL_SUBNETS = 'subnets'
ODL_PORT
|
= 'port'
ODL_PORTS = 'ports'
not_found_exception_map = {ODL_NETWORKS: n_exc.NetworkNotFound,
ODL_SUBNETS: n_exc.SubnetNotFound,
ODL_PORTS: n_exc.PortNotFound}
odl_opts = [
cfg.StrOpt('url',
help=_("HTTP URL of OpenDaylight REST interface.")),
cfg.StrOpt('username',
help=_("HTTP username for authentication")),
cfg.StrOpt('password', secret=True,
help=_("HTTP password for authentication")),
cfg.IntOpt('timeout', default=10,
help=_("HTTP timeout in seconds.")),
cfg.IntOpt('session_timeout', default=30,
help=_("Tomcat session timeout in minutes.")),
]
cfg.CONF.register_opts(odl_opts, "ml2_odl")
def try_del(d, keys):
"""Ignore key errors when deleting from a dictionary."""
for key in keys:
try:
del d[key]
except KeyError:
pass
class JsessionId(requests.auth.AuthBase):
"""Attaches the JSESSIONID and JSESSIONIDSSO cookies to an HTTP Request.
If the cookies are not available or when the session expires, a new
set of cookies are obtained.
"""
def __init__(self, url, username, password):
"""Initialization function for JsessionId."""
# NOTE(kmestery) The 'limit' paramater is intended to limit how much
# data is returned from ODL. This is not implemented in the Hydrogen
# release of OpenDaylight, but will be implemented in the Helium
# timeframe. Hydrogen will silently ignore this value.
self.url = str(url) + '/' + ODL_NETWORKS + '?limit=1'
self.username = username
self.password = password
self.auth_cookies = None
self.last_request = None
self.expired = None
self.session_timeout = cfg.CONF.ml2_odl.session_timeout * 60
self.session_deadline = 0
def obtain_auth_cookies(self):
"""Make a REST call to obtain cookies for ODL authenticiation."""
r = requests.get(self.url, auth=(self.username, self.password))
r.raise_for_status()
jsessionid = r.cookies.get('JSESSIONID')
jsessionidsso = r.cookies.get('JSESSIONIDSSO')
if jsessionid and jsessionidsso:
self.auth_cookies = dict(JSESSIONID=jsessionid,
JSESSIONIDSSO=jsessionidsso)
def __call__(self, r):
"""Verify timestamp for Tomcat session timeout."""
if time.time() > self.session_deadline:
self.obtain_auth_cookies()
self.session_deadline = time.time() + self.session_timeout
r.prepare_cookies(self.auth_cookies)
return r
class OpenDaylightMechanismDriver(api.MechanismDriver):
"""Mechanism Driver for OpenDaylight.
This driver was a port from the Tail-F NCS MechanismDriver. The API
exposed by ODL is slightly different from the API exposed by NCS,
but the general concepts are the same.
"""
auth = None
out_of_sync = True
def initialize(self):
self.url = cfg.CONF.ml2_odl.url
self.timeout = cfg.CONF.ml2_odl.timeout
self.username = cfg.CONF.ml2_odl.username
self.password = cfg.CONF.ml2_odl.password
self.auth = JsessionId(self.url, self.username, self.password)
self.vif_type = portbindings.VIF_TYPE_OVS
self.vif_details = {portbindings.CAP_PORT_FILTER: True}
# Postcommit hooks are used to trigger synchronization.
def create_network_postcommit(self, context):
self.synchronize('create', ODL_NETWORKS, context)
def update_network_postcommit(self, context):
self.synchronize('update', ODL_NETWORKS, context)
def delete_network_postcommit(self, context):
self.synchronize('delete', ODL_NETWORKS, context)
def create_subnet_postcommit(self, context):
self.synchronize('create', ODL_SUBNETS, context)
def update_subnet_postcommit(self, context):
self.synchronize('update', ODL_SUBNETS, context)
def delete_subnet_postcommit(self, context):
self.synchronize('delete', ODL_SUBNETS, context)
def create_port_postcommit(self, context):
self.synchronize('create', ODL_PORTS, context)
def update_port_postcommit(self, context):
self.synchronize('update', ODL_PORTS, context)
def delete_port_postcommit(self, context):
self.synchronize('delete', ODL_PORTS, context)
def synchronize(self, operation, object_type, context):
"""Synchronize ODL with Neutron following a configuration change."""
if self.out_of_sync:
self.sync_full(context)
else:
self.sync_object(operation, object_type, context)
def filter_create_network_attributes(self, network, context, dbcontext):
"""Filter out network attributes not required for a create."""
try_del(network, ['status', 'subnets'])
def filter_create_subnet_attributes(self, subnet, context, dbcontext):
"""Filter out subnet attributes not required for a create."""
pass
def filter_create_port_attributes(self, port, context, dbcontext):
"""Filter out port attributes not required for a create."""
self.add_security_groups(context, dbcontext, port)
# TODO(kmestery): Converting to uppercase due to ODL bug
# https://bugs.opendaylight.org/show_bug.cgi?id=477
port['mac_address'] = port['mac_address'].upper()
try_del(port, ['status'])
def sync_resources(self, resource_name, collection_name, resources,
context, dbcontext, attr_filter):
"""Sync objects from Neutron over to OpenDaylight.
This will handle syncing networks, subnets, and ports from Neutron to
OpenDaylight. It also filters out the requisite items which are not
valid for create API operations.
"""
to_be_synced = []
for resource in resources:
try:
urlpath = collection_name + '/' + resource['id']
self.sendjson('get', urlpath, None)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
attr_filter(resource, context, dbcontext)
to_be_synced.append(resource)
key = resource_name if len(to_be_synced) == 1 else collection_name
# 400 errors are returned if an object exists, which we ignore.
self.sendjson('post', collection_name, {key: to_be_synced}, [400])
@utils.synchronized('odl-sync-full')
def sync_full(self, context):
"""Resync the entire database to ODL.
Transition to the in-sync state on success.
Note: we only allow a single thead in here at a time.
"""
if not self.out_of_sync:
return
dbcontext = context._plugin_context
networks =
|
lmcro/webserver
|
admin/plugins/error_redir.py
|
Python
|
gpl-2.0
| 4,572
| 0.012905
|
# -*- coding: utf-8 -*-
#
# Cheroke-admin
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2001-2014 Alvaro Lopez Ortega
#
# This program is
|
free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
|
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import CTK
import validations
from util import *
from consts import *
URL_APPLY = '/plugin/error_redir/apply'
REDIRECTION_TYPE = [
('0', N_('Internal')),
('1', N_('External'))
]
VALIDATIONS = [
('new_redir', validations.is_path)
]
NOTE_ERROR = N_('HTTP Error to match.')
NOTE_REDIR = N_('Target to access whenever the HTTP Error occurs.')
NOTE_TYPE = N_('Whether the redirection should be Internal or External.')
def commit():
# New entry
key = CTK.post.pop('key')
new_error = CTK.post.pop('new_error')
new_redir = CTK.post.pop('new_redir')
new_type = CTK.post.pop('new_type')
if key and new_error and new_redir and new_type:
CTK.cfg['%s!%s!url' %(key, new_error)] = new_redir
CTK.cfg['%s!%s!show'%(key, new_error)] = new_type
return CTK.cfg_reply_ajax_ok()
# Modification
return CTK.cfg_apply_post()
def sorting_func (x,y):
if x == y == 'default':
return 0
if x == 'default':
return 1
if y == 'default':
return -1
return cmp(int(x), int(y))
class Content (CTK.Container):
def __init__ (self, refreshable, key, url_apply, **kwargs):
CTK.Container.__init__ (self, **kwargs)
# List
entries = CTK.cfg.keys(key)
entries.sort (sorting_func)
if entries:
table = CTK.Table({'id': 'error-redirection'})
table.set_header(1)
table += [CTK.RawHTML(x) for x in ('Error', 'Redirection', 'Type', '')]
for i in entries:
show = CTK.ComboCfg ('%s!%s!show'%(key,i), trans_options(REDIRECTION_TYPE))
redir = CTK.TextCfg ('%s!%s!url'%(key,i), False)
rm = CTK.ImageStock('del')
table += [CTK.RawHTML(i), redir, show, rm]
rm.bind ('click', CTK.JS.Ajax (url_apply,
data = {"%s!%s"%(key,i): ''},
complete = refreshable.JS_to_refresh()))
submit = CTK.Submitter (url_apply)
submit += table
self += submit
# Add new
redir_codes = [('default', _('Default Error'))]
redir_codes += filter (lambda x: not x[0] in entries, ERROR_CODES)
table = CTK.PropsTable()
table.Add (_('Error'), CTK.ComboCfg('new_error', redir_codes, {'class':'noauto'}), _(NOTE_ERROR))
table.Add (_('Redirection'), CTK.TextCfg ('new_redir', False, {'class':'noauto'}), _(NOTE_REDIR))
table.Add (_('Type'), CTK.ComboCfg('new_type', trans_options(REDIRECTION_TYPE), {'class':'noauto'}), _(NOTE_TYPE))
submit = CTK.Submitter(url_apply)
dialog = CTK.Dialog({'title': _('Add New Custom Error'), 'width': 540})
dialog.AddButton (_("Close"), 'close')
dialog.AddButton (_("Add"), submit.JS_to_submit())
submit += table
submit += CTK.HiddenField ({'name': 'key', 'value': key})
submit.bind ('submit_success', refreshable.JS_to_refresh())
submit.bind ('submit_success', dialog.JS_to_close())
dialog += submit
self += dialog
add_new = CTK.Button(_('Add New'))
add_new.bind ('click', dialog.JS_to_show())
self += add_new
class Plugin_error_redir (CTK.Plugin):
def __init__ (self, key, vsrv_num):
CTK.Plugin.__init__ (self, key)
url_apply = '%s/%s' %(URL_APPLY, vsrv_num)
# Content
refresh = CTK.Refreshable ({'id': 'plugin_error'})
refresh.register (lambda: Content(refresh, key, url_apply).Render())
self += CTK.Indenter (refresh)
# Validation, and Public URLs
CTK.publish ('^%s/[\d]+'%(URL_APPLY), commit, method="POST", validation=VALIDATIONS)
|
markgw/pimlico
|
admin/bootstrap.py
|
Python
|
gpl-3.0
| 8,150
| 0.003067
|
"""
Bootstrapping script that create a basic Pimlico setup, either for an existing config file, or for a new project.
Distribute this with your Pimlico project code. You don't need to distribute Pimlico itself
with your project, since it can be downloaded later. Just distribute a directory tree containing your config files,
your own code and this Python script, which will fetch everything else it needs.
Another use is to get a whole new project up and running. Use the `newproject.py` script for that purpose, which
calls this script.
"""
from __future__ import print_function
import os
import sys
from io import open
# Provide simply Py2-3 compatibility without requiring other libraries
PY3 = sys.version_info[0] == 3
if PY3:
from ur
|
llib.request import urlopen
else:
from urllib2 import urlopen
import tarfile
import json
RAW_URL = "https://raw.githubusercontent.com/markgw/pimlico/"
DOWNLOAD_URL = "https://github.com/markgw/pimlico/archive/"
GIT_URL = "https://github.com/markgw/pimlico.git"
GITHUB_API = "https://api.github.com"
def lookup_pimlico_versions():
# Use Github API to find all tagged releases
tag_api_url = "%s/repos/markgw/pimlico/tags" % GITHUB_API
try:
tag_response = urlope
|
n(tag_api_url).read().decode("utf-8")
except Exception as e:
print("Could not fetch Pimlico release tags from {}: {}".format(tag_api_url, e))
sys.exit(1)
tag_data = json.loads(tag_response)
return [tag["name"] for tag in reversed(tag_data)]
def lookup_bleeding_edge(branch_url):
release_url = "{}admin/release.txt".format(branch_url)
try:
release_data = urlopen(release_url).read().decode("utf-8")
except Exception as e:
print("Could not fetch Pimlico release from {}: {}".format(release_url, e))
sys.exit(1)
return release_data.splitlines()[-1].lstrip("v")
def find_config_value(config_path, key, start_in_pipeline=False):
with open(config_path, "r", encoding="utf-8") as f:
in_pipeline = start_in_pipeline
for line in f:
line = line.strip("\n ")
if in_pipeline and line:
# Look for the required key in the pipeline section
line_key, __, line_value = line.partition("=")
if line_key.strip() == key:
return line_value.strip()
elif line.startswith("["):
# Section heading
# Start looking for keys if we're in the pipeline section
in_pipeline = line.strip("[]") == "pipeline"
elif line.upper().startswith("%% INCLUDE"):
# Found include directive: follow into the included file
filename = line[10:].strip()
# Get filename relative to current config file
filename = os.path.join(os.path.dirname(config_path), filename)
found_value = find_config_value(filename, key, start_in_pipeline=in_pipeline)
if found_value is not None:
return found_value
# Didn't find the key anywhere
return
def extract(tar_path):
extract_path = os.path.dirname(tar_path)
with tarfile.open(tar_path, "r:gz") as tar:
for item in tar:
tar.extract(item, extract_path)
def tar_dirname(tar_path):
with tarfile.open(tar_path, "r:gz") as tar:
# Expect first member to be a directory
member = tar.next()
if not member.isdir():
raise ValueError("downloaded tar file was expected to contain a directory, but didn't")
return member.name
def symlink(source, link_name):
"""
Symlink creator that works on Windows.
"""
os_symlink = getattr(os, "symlink", None)
if callable(os_symlink):
os_symlink(source, link_name)
else:
import ctypes
csl = ctypes.windll.kernel32.CreateSymbolicLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
flags = 1 if os.path.isdir(source) else 0
if csl(link_name, source, flags) == 0:
raise ctypes.WinError()
def bootstrap(config_file, git=False):
current_dir = os.path.abspath(os.path.dirname(__file__))
branch_name = git if type(git) is str else "master"
branch_url = "{}{}/".format(RAW_URL, branch_name)
if os.path.exists(os.path.join(current_dir, "pimlico")):
print("Pimlico source directory already exists: delete it if you want to fetch again")
sys.exit(1)
# Check the config file to find the version of Pimlico we need
version = find_config_value(config_file, "release")
if version is None:
print("Could not find Pimlico release in config file %s" % config_file)
sys.exit(1)
major_version = int(version.partition(".")[0])
print("Config file requires Pimlico version {}".format(version))
available_releases = lookup_pimlico_versions()
bleeding_edge = lookup_bleeding_edge(branch_url)
tags = available_releases
# If the bleeding edge version is compatible (same major version) just use that
if int(bleeding_edge.lstrip("v").partition(".")[0]) == major_version:
print("Bleeding edge ({}) is compatible".format(bleeding_edge))
fetch_release = "master"
else:
if git:
print("Error: tried to clone the Git repo instead of fetching a release, but config file is not " \
"compatible with latest Pimlico version")
sys.exit(1)
# Find the latest release that has the same major version
compatible_tags = [t for t in tags if int(t.lstrip("v").partition(".")[0]) == major_version]
fetch_release = compatible_tags[-1]
print("Fetching latest release of major version {}, which is {}".format(major_version, fetch_release))
if git:
# Clone the latest version of the code from the Git repository
# Allow the git kwarg to name a branch to clone
if type(git) is str:
args = "--branch {} ".format(git)
else:
args = ""
print("Cloning git repository ({})".format("{} branch".format(git) if type(git) is str else "master"))
import subprocess
subprocess.check_call("git clone {}{}".format(args, GIT_URL), shell=True)
else:
archive_url = "%s%s.tar.gz" % (DOWNLOAD_URL, fetch_release)
print("Downloading Pimlico source code from {}".format(archive_url))
tar_download_path = os.path.join(current_dir, "archive.tar.gz")
with open(tar_download_path, "wb") as archive_file:
archive_file.write(urlopen(archive_url).read())
print("Extracting source code")
extracted_dirname = tar_dirname(tar_download_path)
extract(tar_download_path)
# Extracted source code: remove the archive
os.remove(tar_download_path)
os.rename(os.path.join(current_dir, extracted_dirname), os.path.join(current_dir, "pimlico"))
print("Pimlico source (%s) is now available in directory pimlico/" % fetch_release)
# Create symlink to pimlico.sh, so it's easier to run
print("Creating symlink pimlico.sh for running Pimlico")
symlink(os.path.join("pimlico", "bin", "pimlico.sh"), "pimlico.sh")
if __name__ == "__main__":
args = sys.argv[1:]
if "--git" in args:
args.remove("--git")
git = True
else:
git = False
if len(args) == 0:
print("Usage:")
print(" python bootstrap.py [--git] <config_file>")
print()
print("Specify a Pimlico config file to set up Pimlico for")
print("If you want to start a new project, with an empty config file, use the newproject.py script")
print()
print("If you specify --git, Pimlico will be cloned as a Git repository, rather ")
print("than downloaded from a release. This only works on Linux and requires that Git is ")
print("installed. Most of the time, you don't want to do this: it's only for Pimlico development")
sys.exit(1)
else:
config_file = os.path.abspath(args[0])
bootstrap(config_file, git=git)
|
mayankcu/Django-social
|
venv/Lib/site-packages/distribute-0.6.28-py2.7.egg/setuptools/command/bdist_egg.py
|
Python
|
bsd-3-clause
| 18,594
| 0.006023
|
"""setuptools.command.bdist_egg
Build .egg distributions"""
# This module should be kept compatible with Python 2.3
import sys, os, marshal
from setuptools import Command
from distutils.dir_util import remove_tree, mkpath
try:
from distutils.sysconfig import get_python_version, get_python_lib
except ImportError:
from sysconfig import get_python_version
from distutils.sysconfig import get_python_lib
from distutils import log
from distutils.errors import DistutilsSetupError
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from types import CodeType
from setuptools.extension import Library
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
f = open(pyfile,'w')
f.write('\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __loader__, __file__",
" import sys, pkg_resources, imp",
" __file__ = pkg_resources.resource_filename(__name__,%r)"
% resource,
" __loader__ = None; del __bootstrap__, __loader__",
" imp.load_dynamic(__name__,__file__)",
"__bootstrap__()",
"" # terminal \n
]))
f.close()
# stub __init__.py for packages distributed without one
NS_PKG_STUB = '__import__("pkg_resources").declare_namespace(__name__)'
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename+'.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(get_python_lib()))
old, self.distribution.data_files = self.distribution.data_files,[]
for item in old:
if isinstance(item,tuple) and len(item)==2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized==site_packages or normalized.startswith(
site_packages+os.sep
):
item = realpath[len(site_packages)+1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self,cmdname,**kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname,self.bdist_dir)
kw.setdefault('skip_build',self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root; instcmd.root = None
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p,ext_name) in enumerate(ext_outputs):
filename,ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename)+'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep,'/')
to_compile.extend(self.make_init_files())
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root,'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts',install_dir=script_dir,no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
|
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native
|
_libs)
write_safety_flag(
os.path.join(archive_root,'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info,'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "
|
mbalasso/mynumpy
|
numpy/core/tests/test_memmap.py
|
Python
|
bsd-3-clause
| 4,069
| 0.002949
|
import sys
from tempfile import NamedTemporaryFile, TemporaryFile, mktemp
import os
from numpy import memmap
from numpy import arange, allclose, asarray
from numpy.testing import *
class TestMemmap(TestCase):
def setUp(self):
self.tmpfp = NamedTemporaryFile(prefix='mmap')
self.shape = (3,4)
self.dtype = 'float32'
self.data = arange(12, dtype=self.dtype)
self.data.resize(self.shape)
def tearDown(self):
self.tmpfp.close()
def test_roundtrip(self):
# Write data to file
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp # Test __del__ machinery, which handles cleanup
# Read data back from file
newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r',
shape=self.shape)
assert_(allclose(self.data, newfp))
assert_array_equal(self.data, newfp)
def test_open_with_filename(self):
tmpname = mktemp('','mmap')
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp
os.unlink(tmpname)
def test_unnamed_file(self):
f = TemporaryFile()
fp = memmap(f, dtype=self.dtype, shape=self.shape)
del fp
f.close()
def test_attributes(self):
offset = 1
mode = "w+"
fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode,
shape=self.shape, offset=offset)
self.assertEqual(offset, fp.offset)
self.assertEqual(mode, fp.mode)
del fp
def test_filename(self):
tmpname = mktemp('','mmap')
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
abspath = os.path.abspath(tmpname)
fp[:] = self.data[:]
self.ass
|
ertEqual(abspath, fp.filename)
b = fp[:1]
self.assertEqual(abspath, b.filename)
del b
del fp
os.unlink(tmpname)
def test_filename_fileobj(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+",
shape=self.shape)
|
self.assertEqual(fp.filename, self.tmpfp.name)
@dec.knownfailureif(sys.platform=='gnu0', "This test is known to fail on hurd")
def test_flush(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
assert_equal(fp[0], self.data[0])
fp.flush()
def test_del(self):
# Make sure a view does not delete the underlying mmap
fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp_base[0] = 5
fp_view = fp_base[0:1]
assert_equal(fp_view[0], 5)
del fp_view
# Should still be able to access and assign values after
# deleting the view
assert_equal(fp_base[0], 5)
fp_base[0] = 6
assert_equal(fp_base[0], 6)
def test_arithmetic_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = (fp + 10)
if isinstance(tmp, memmap):
assert tmp._mmap is not fp._mmap
def test_indexing_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = fp[[(1, 2), (2, 3)]]
if isinstance(tmp, memmap):
assert tmp._mmap is not fp._mmap
def test_slicing_keeps_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
assert fp[:2, :2]._mmap is fp._mmap
def test_view(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
new1 = fp.view()
new2 = new1.view()
assert(new1.base is fp)
assert(new2.base is fp)
new_array = asarray(fp)
assert(new_array.base is fp)
if __name__ == "__main__":
run_module_suite()
|
chris48s/UK-Polling-Stations
|
polling_stations/apps/data_collection/management/commands/import_monmouthshire.py
|
Python
|
bsd-3-clause
| 1,244
| 0.004019
|
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseShpStationsShpDistrictsImporter
class Command(BaseShpStationsShpDistrictsImporter):
srid = 27700
council_id = 'W06000021'
districts_name = 'polling_district'
stations_name = 'polling_station.shp'
elections = [
'local.monmouthshire.2017-05-04',
'parl.2017-06-08'
]
def district_record_to_dict(self, record):
return {
'internal_council_id': str(record[1]).strip(),
'name': str(record[1]).strip(),
'polling_station_id': record[3]
}
def station_record_to_dict(self, record):
station = {
'internal_council_id': record[0],
'postcode' : '',
'address' : "%s\n%s" % (record[2].strip(), record[4].strip()),
}
if str(record[1]).strip() == '10033354925':
"""
There is a dodgy point in this file.
It has too many digits for a UK national grid reference.
Joe queried, Monmou
|
thshire provided this corrected point by e
|
mail
"""
station['location'] = Point(335973, 206322, srid=27700)
return station
|
abhinavagarwalla/modular_rl
|
modular_rl/core.py
|
Python
|
mit
| 24,088
| 0.008261
|
import numpy as np, time, itertools
from collections import OrderedDict
from .misc_utils import *
from . import distributions
concat = np.concatenate
import theano.tensor as T, theano
from importlib import import_module
import scipy.optimize
from .keras_theano_setup import floatX, FNOPTS
from keras.layers.core import Layer
from .filters import *
from .filtered_env import *
import random
import copy
import opensim as osim
from osim.env import *
# ================================================================
# Make agent
# ================================================================
def get_agent_cls(name):
p, m = name.rsplit('.', 1)
mod = import_module(p)
constructor = getattr(mod, m)
return constructor
# ================================================================
# Stats
# ================================================================
def add_episode_stats(stats, paths):
reward_key = "reward_raw" if "reward_raw" in paths[0] else "reward"
episoderewards = np.array([path[reward_key].sum() for path in paths])
pathlengths = np.array([pathlength(path) for path in paths])
stats["EpisodeRewards"] = episoderewards
stats["EpisodeLengths"] = pathlengths
stats["NumEpBatch"] = len(episoderewards)
stats["EpRewMean"] = episoderewards.mean()
stats["EpRewSEM"] = episoderewards.std()/np.sqrt(len(paths))
stats["EpRewMax"] = episoderewards.max()
stats["EpRewMin"] = episoderewards.min()
stats["EpLenMean"] = pathlengths.mean()
stats["EpLenMax"] = pathlengths.max()
stats["EpLenMin"] = pathlengths.min()
stats["RewPerStep"] = episoderewards.sum()/pathlengths.sum()
def add_prefixed_stats(stats, prefix, d):
for (k,v) in d.items():
stats[prefix+"_"+k] = v
# ================================================================
# Policy Gradients
# ================================================================
def compute_advantage(vf, paths, gamma, lam):
# Compute return, baseline, advantage
for path in paths:
path["return"] = discount(path["reward"], gamma)
b = path["baseline"] = vf.predict(path)
b1 = np.append(b, 0 if path["terminated"] else b[-1])
deltas = path["reward"] + gamma*b1[1:] - b1[:-1]
path["advantage"] = discount(deltas, gamma * lam)
alladv = np.concatenate([path["advantage"] for path in paths])
# Standardize advantage
std = alladv.std()
mean = alladv.mean()
for path in paths:
path["advantage"] = (path["advantage"] - mean) / std
PG_OPTIONS = [
("timestep_limit", int, 0, "maximum length of trajectories"),
("n_iter", int, 200, "number of batch"),
("parallel", int, 0, "collect trajectories in parallel"),
("timesteps_per_batch", int, 10000, ""),
("gamma", float, 0.99, "discount"),
("lam", float, 1.0, "lambda parameter from generalized advantage estimation"),
]
def run_policy_gradient_algorithm(env, agent, usercfg=None, callback=None):
cfg = update_default_config(PG_OPTIONS, usercfg)
cfg.update(usercfg)
print("policy gradient config", cfg)
# if cfg["parallel"]:
# raise NotImplementedError
tstart = time.time()
seed_iter = itertools.count()
for _ in range(cfg["n_iter"]):
# Rollouts ========
paths = get_paths(env, agent, cfg, seed_iter)
paths_subsampled = paths #subsample_paths(paths)
compute_advantage(agent.baseline, paths_subsampled, gamma=cfg["gamma"], lam=cfg["lam"])
# VF Update ========
vf_stats = agent.baseline.fit(paths_subsampled)
# Pol Update ========
pol_stats = agent.updater(paths_subsampled)
# Stats ========
stats = OrderedDict()
add_episode_stats(stats, paths)
add_prefixed_stats(stats, "vf", vf_stats)
add_prefixed_stats(stats, "pol", pol_stats)
stats["TimeElapsed"] = time.time() - tstart
if callback: callback(stats)
def run_policy_gradient_algorithm_hardmining(env, agent, usercfg=None, callback=None, seed_iter=None):
cfg = update_default_config(PG_OPTIONS, usercfg)
cfg.update(usercfg)
print("policy gradient config", cfg)
# if cfg["parallel"]:
# raise NotImplementedError
tstart = time.time()
if seed_iter is None:
seed_iter = itertools.count()
for _ in range(cfg["n_iter"]):
# Rollouts ========
paths = get_paths(env, agent, cfg, seed_iter)
paths_subsampled = paths #subsample_paths(paths)
compute_advantage(agent.baseline, paths_subsampled, gamma=cfg["gamma"], lam=cfg["lam"])
# VF Update ========
vf_stats = agent.baseline.fit(paths_subsampled)
# Pol Update ========
pol_stats = agent.updater(paths_subsampled)
# Stats ========
stats = OrderedDict()
add_episode_stats(stats, paths)
add_prefixed_stats(stats, "vf", vf_stats)
add_prefixed_stats(stats, "pol", pol_stats)
stats["TimeElapsed"] = time.time() - tstart
if callback: callback(stats)
# def subsample_paths(gpaths):
# paths = copy.deepcopy(gpaths)
# for i in range(len(paths)):
# plen = paths[i]['action'].shape[0]
# rno = random.sample(range(plen), 2*plen/3)
# for j in paths[i].keys():
# paths[i][j] = np.delete(paths[i][j], rno, axis=0)
# return paths
def parallel_rollout_worker((agent, ts_limit, ts_batch, iffilter, seed)):
try:
# print("Paralel rollout has been called")
return do_rollouts_serial(agent, ts_limit, ts_batch, iffilter, seed)
except Exception, e:
print("Exception in rollout worker: %s" % e)
import traceback; traceback.print_exc()
raise
def get_paths(env, agent, cfg, seed_iter):
paths = []
if cfg["parallel"]:
start_time = time.time()
from multiprocessing import Pool
# from pathos.multiprocessing import ProcessPool as Pool
num_processes = int(cfg["parallel"])
pool = Pool(processes=num_processes)
# very simple scheme, split work evenly among pool workers (queue would be better)
try:
def callback(result):
print("Length of paths: ", len(result), type(result))
paths.extend([path for paths_list in result for path in paths_list])
args_list = [(agent,
cfg['timestep_limit'],
cfg['timesteps_per_batch'] / num_processes,
cfg['filter'], next(seed_iter)
) for _ in range(num_processes)]
print(args_list)
result = pool.map_async(parallel_rollout_worker, args_list, callback=callback)
# result = pool.map(parallel_rollout_worker, args_list)
result.wait()#1e5)
if not paths:
# print("Paths is still empty")
# raise Exceptio
|
n
result.get()
except KeyboardInterrupt:
pool.terminate()
raise
except Exception:
pool.terminate()
raise
else:
pool.close()
finally:
|
pool.join()
print("Time elapsed (%d workers): %.2f" % (num_processes, time.time() - start_time))
else:
paths = do_rollouts_serial(agent, cfg["timestep_limit"], cfg["timesteps_per_batch"], cfg["filter"], next(seed_iter))
return paths
def rollout(env, agent, timestep_limit, seed):
"""
Simulate the env and agent for timestep_limit steps
"""
ob = env._reset(difficulty = 2, seed = seed)
terminated = False
data = defaultdict(list)
for _ in range(timestep_limit):
ob = agent.obfilt(ob)
data["observation"].append(ob)
action, agentinfo = agent.act(ob)
data["action"].append(action)
for (k,v) in agentinfo.items():
data[k].append(v)
ob,rew,done,envinfo = env.step(action)
data["reward"].append(rew)
rew = agent.rewfilt(rew)
for (k,v) in envinfo.items():
data[k].append(v)
if done:
terminated = True
break
data = {k:np.array(v) for (k,v) in data.items()}
|
catapult-project/catapult-csm
|
telemetry/telemetry/value/scalar_unittest.py
|
Python
|
bsd-3-clause
| 7,682
| 0.002343
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import story
from telemetry import page as page_module
from telemetry import value
from telemetry.value import improvement_direction
from telemetry.value import none_values
from telemetry.value import scalar
class TestBase(unittest.TestCase):
def setUp(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(
page_module.Page('http://www.bar.com/', story_set, story_set.base_dir,
name='http://www.bar.com/'))
story_set.AddStory(
page_module.Page('http://www.baz.com/', story_set, story_set.base_dir,
name='http://www.baz.com/'))
story_set.AddStory(
page_module.Page('http://www.foo.com/', story_set, story_set.base_dir,
name='http://www.foo.com/'))
self.story_set = story_set
@property
def pages(self):
return self.story_set.stories
class ValueTest(TestBase):
def testRepr(self):
page0 = self.pages[0]
v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=True,
description='desc', tir_label='my_ir',
improvement_direction=improvement_direction.DOWN)
expected = ('ScalarValue(http://www.bar.com/, x, unit, 3, important=True, '
'description=desc, tir_label=my_ir, '
'improvement_direction=down, grouping_keys={}')
self.assertEquals(expected, str(v))
def testBuildbotValueType(self):
page0 = self.pages[0]
v = scalar.Scal
|
arValue(page0, 'x', 'unit', 3, important=True,
improvement_direction=improvement_direction.DOWN)
self.assertEquals('default', v.GetBuildbotDataType(
value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))
self.assertEquals([3], v.GetBuildbotValue())
se
|
lf.assertEquals(('x', page0.name),
v.GetChartAndTraceNameForPerPageResult())
v = scalar.ScalarValue(page0, 'x', 'unit', 3, important=False,
improvement_direction=improvement_direction.DOWN)
self.assertEquals(
'unimportant',
v.GetBuildbotDataType(value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))
def testScalarSamePageMerging(self):
page0 = self.pages[0]
v0 = scalar.ScalarValue(page0, 'x', 'unit', 1,
description='important metric',
improvement_direction=improvement_direction.UP)
v1 = scalar.ScalarValue(page0, 'x', 'unit', 2,
description='important metric',
improvement_direction=improvement_direction.UP)
self.assertTrue(v1.IsMergableWith(v0))
vM = scalar.ScalarValue.MergeLikeValuesFromSamePage([v0, v1])
self.assertEquals(page0, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals('important metric', vM.description)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2], vM.values)
self.assertEquals(improvement_direction.UP, vM.improvement_direction)
def testScalarDifferentPageMerging(self):
page0 = self.pages[0]
page1 = self.pages[1]
v0 = scalar.ScalarValue(page0, 'x', 'unit', 1,
description='important metric',
improvement_direction=improvement_direction.UP)
v1 = scalar.ScalarValue(page1, 'x', 'unit', 2,
description='important metric',
improvement_direction=improvement_direction.UP)
vM = scalar.ScalarValue.MergeLikeValuesFromDifferentPages([v0, v1])
self.assertEquals(None, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals('important metric', vM.description)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2], vM.values)
self.assertEquals(improvement_direction.UP, vM.improvement_direction)
def testScalarWithNoneValueMerging(self):
page0 = self.pages[0]
v0 = scalar.ScalarValue(
page0, 'x', 'unit', 1, improvement_direction=improvement_direction.DOWN)
v1 = scalar.ScalarValue(page0, 'x', 'unit', None, none_value_reason='n',
improvement_direction=improvement_direction.DOWN)
self.assertTrue(v1.IsMergableWith(v0))
vM = scalar.ScalarValue.MergeLikeValuesFromSamePage([v0, v1])
self.assertEquals(None, vM.values)
expected_none_value_reason = (
'Merging values containing a None value results in a None value. '
'None values: [ScalarValue(http://www.bar.com/, x, unit, None, '
'important=True, description=None, tir_label=None, '
'improvement_direction=down, grouping_keys={}]')
self.assertEquals(expected_none_value_reason, vM.none_value_reason)
def testScalarWithNoneValueMustHaveNoneReason(self):
page0 = self.pages[0]
self.assertRaises(none_values.NoneValueMissingReason,
lambda: scalar.ScalarValue(
page0, 'x', 'unit', None,
improvement_direction=improvement_direction.UP))
def testScalarWithNoneReasonMustHaveNoneValue(self):
page0 = self.pages[0]
self.assertRaises(none_values.ValueMustHaveNoneValue,
lambda: scalar.ScalarValue(
page0, 'x', 'unit', 1, none_value_reason='n',
improvement_direction=improvement_direction.UP))
def testAsDict(self):
v = scalar.ScalarValue(None, 'x', 'unit', 42, important=False,
improvement_direction=improvement_direction.DOWN)
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d, {'value': 42})
def testNoneValueAsDict(self):
v = scalar.ScalarValue(None, 'x', 'unit', None, important=False,
none_value_reason='n',
improvement_direction=improvement_direction.DOWN)
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d, {'value': None, 'none_value_reason': 'n'})
def testFromDictInt(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': 42,
'improvement_direction': improvement_direction.DOWN,
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertEquals(v.value, 42)
self.assertEquals(v.improvement_direction, improvement_direction.DOWN)
def testFromDictFloat(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': 42.4,
'improvement_direction': improvement_direction.UP,
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertEquals(v.value, 42.4)
def testFromDictWithoutImprovementDirection(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': 42,
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertIsNone(v.improvement_direction)
def testFromDictNoneValue(self):
d = {
'type': 'scalar',
'name': 'x',
'units': 'unit',
'value': None,
'none_value_reason': 'n',
'improvement_direction': improvement_direction.UP,
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, scalar.ScalarValue))
self.assertEquals(v.value, None)
self.assertEquals(v.none_value_reason, 'n')
|
epaglier/Project-JARVIS
|
jarvis-features/Weather AI/weatherai.py
|
Python
|
gpl-3.0
| 2,382
| 0.011755
|
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
import numpy as np
def predictClothesGeneral(temp):
dataFile = open("data.txt")
data = dataFile.read()
data = data.split("\n")
X = []
Y = []
Y2 = []
for i in range(0,len(data) - 1):
X.append([float(data[i].split(":")[1])])
Y.append(int(data[i].split(":")[3]))
Y2.append(int(data[i].split(":")[4]))
clf = RandomForestClassifier(n_estimators=25)
clf2 = RandomForestClassifier(n_estimators=25)
clf.fit(X,Y)
clf2.fit(X,Y2)
pants = clf.predict([[temp]])
tops = clf2.predict([[temp]])
s = "I recommend you wear a pair of "
if pants == 1:
s = s + "jeans"
else:
s = s + "khaki shorts"
s = s + " and a "
if tops == 1:
s = s + "shirt, its a nice day out!"
elif tops == 2:
s = s + "sweat shirt."
else:
s = s + "jacket, it will be chilly today."
return s
def predictFromFileGeneral(fileName):
fi = open(fileName)
data = fi.read().split("\n")
for i in range(0,len(data) - 1):
data2 = data[i].split(":")
print "At " + data2[1].split(",")[0] + " degrees... " + predictClothesGeneral(float(data2[1].split(",")[0]))
def addToKnownList(shirt, temp):
dataFile = open("userAdded.txt", 'a')
dataFile.write(str(shirt + ":" + str(temp)) + '\n')
def predictClothesData(temp):
dataFile = open("userAdded.txt")
data = dataFile.read()
data = data.split("\n")
X = []
Y = []
for i in range(0,len(data) - 1):
X.append([float(data[i].split(":")[1])])
Y.append(data[i].split(":")[0])
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X,Y)
predict = clf.predict([[temp]])
return predict
def predictFromFileData(fileName):
fi = open(fileName)
data = fi.read().split("\n")
for i in range(0,len(data) - 1):
d
|
ata2 = data[i].split(":")
|
print "At " + data2[1].split(",")[0] + " degrees... I would recommend a " + predictClothesData(float(data2[1].split(",")[0]))[0]
|
Z2PackDev/bands_inspect
|
bands_inspect/lattice.py
|
Python
|
apache-2.0
| 1,413
| 0
|
# -*- c
|
oding: utf-8 -*-
# (c) 2017-2019, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Defines a crystal lattice class.
"""
import numpy as np
import scipy.linalg as l
|
a
from fsc.export import export
# TODO: move to a separate module # pylint: disable=fixme,useless-suppression
@export
class Lattice:
"""
Defines a periodic lattice.
"""
def __init__(self, matrix):
self.matrix = np.array(matrix)
def __array__(self):
return self.matrix
@property
def reciprocal_lattice(self):
return type(self)(matrix=2 * np.pi * la.inv(self.matrix).T)
def get_cartesian_coords(self, fractional_coords):
return np.dot(fractional_coords, self.matrix)
def get_fractional_coords(self, cartesian_coords):
return la.solve(self.matrix.T, np.array(cartesian_coords).T).T
def get_cartesian_distance(self, fractional_coord_1, fractional_coord_2):
return la.norm(
self.get_cartesian_coords(fractional_coord_1) -
self.get_cartesian_coords(fractional_coord_2)
)
def get_reciprocal_cartesian_distance( # pylint: disable=invalid-name
self, reciprocal_fractional_coord_1, reciprocal_fractional_coord_2
):
return self.reciprocal_lattice.get_cartesian_distance(
reciprocal_fractional_coord_1, reciprocal_fractional_coord_2
)
|
ericholscher/django
|
django/contrib/gis/db/backends/base.py
|
Python
|
bsd-3-clause
| 11,584
| 0.001468
|
"""
Base/mixin classes for the spatial backend database operations and the
`SpatialRefSys` model the backend.
"""
import re
from django.contrib.gis import gdal
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class BaseSpatialOperations(object):
"""
This module holds the base `Ba
|
seSpatialBackend` object, which is
instantiated by each spatial database backend with the features
it has.
"""
distance_functions = {}
geometry_func
|
tions = {}
geometry_operators = {}
geography_operators = {}
geography_functions = {}
gis_terms = set()
truncate_params = {}
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = None
# Does the spatial database have a geometry or geography type?
geography = False
geometry = False
area = False
centroid = False
difference = False
distance = False
distance_sphere = False
distance_spheroid = False
envelope = False
force_rhr = False
mem_size = False
bounding_circle = False
num_geom = False
num_points = False
perimeter = False
perimeter3d = False
point_on_surface = False
polygonize = False
reverse = False
scale = False
snap_to_grid = False
sym_difference = False
transform = False
translate = False
union = False
# Aggregates
collect = False
extent = False
extent3d = False
make_line = False
unionagg = False
# Serialization
geohash = False
geojson = False
gml = False
kml = False
svg = False
# Constructors
from_text = False
from_wkb = False
# Default conversion functions for aggregates; will be overridden if implemented
# for the spatial backend.
def convert_extent(self, box):
raise NotImplementedError('Aggregate extent not implemented for this spatial backend.')
def convert_extent3d(self, box):
raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.')
def convert_geom(self, geom_val, geom_field):
raise NotImplementedError('Aggregate method not implemented for this spatial backend.')
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Returns the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_type() method')
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError('Distance operations not available on this spatial backend.')
def get_geom_placeholder(self, f, value):
"""
Returns the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_placeholder() method')
def get_expression_column(self, evaluator):
"""
Helper method to return the quoted column string from the evaluator
for its expression.
"""
for expr, col_tup in evaluator.cols:
if expr is evaluator.expression:
return '%s.%s' % tuple(map(self.quote_name, col_tup))
raise Exception("Could not find the column for the expression.")
# Spatial SQL Construction
def spatial_aggregate_sql(self, agg):
raise NotImplementedError('Aggregate support not implemented for this spatial backend.')
def spatial_lookup_sql(self, lvalue, lookup_type, value, field):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide spatial_lookup_sql() method')
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide geometry_columns() method')
def spatial_ref_sys(self):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide spatial_ref_sys() method')
@python_2_unicode_compatible
class SpatialRefSysMixin(object):
"""
The SpatialRefSysMixin is a class used by the database-dependent
SpatialRefSys objects to reduce redundnant code.
"""
# For pulling out the spheroid from the spatial reference string. This
# regular expression is used only if the user does not have GDAL installed.
# TODO: Flattening not used in all ellipsoids, could also be a minor axis,
# or 'b' parameter.
spheroid_regex = re.compile(r'.+SPHEROID\[\"(?P<name>.+)\",(?P<major>\d+(\.\d+)?),(?P<flattening>\d{3}\.\d+),')
# For pulling out the units on platforms w/o GDAL installed.
# TODO: Figure out how to pull out angular units of projected coordinate system and
# fix for LOCAL_CS types. GDAL should be highly recommended for performing
# distance queries.
units_regex = re.compile(r'.+UNIT ?\["(?P<unit_name>[\w \'\(\)]+)", ?(?P<unit>[\d\.]+)(,AUTHORITY\["(?P<unit_auth_name>[\w \'\(\)]+)","(?P<unit_auth_val>\d+)"\])?\]([\w ]+)?(,AUTHORITY\["(?P<auth_name>[\w \'\(\)]+)","(?P<auth_val>\d+)"\])?\]$')
@property
def srs(self):
"""
Returns a GDAL SpatialReference object, if GDAL is installed.
"""
if gdal.HAS_GDAL:
# TODO: Is caching really necessary here? Is complexity worth it?
if hasattr(self, '_srs'):
# Returning a clone of the cached SpatialReference object.
return self._srs.clone()
else:
# Attempting to cache a SpatialReference object.
# Trying to get from WKT first.
try:
self._srs = gdal.SpatialReference(self.wkt)
return self.srs
except Exception as msg:
pass
try:
self._srs = gdal.SpatialReference(self.proj4text)
return self.srs
except Exception as msg:
pass
raise Exception('Could not get OSR SpatialReference from WKT: %s\nError:\n%s' % (self.wkt, msg))
else:
raise Exception('GDAL is not installed.')
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening).
"""
if gdal.HAS_GDAL:
return self.srs.ellipsoid
else:
m = self.spheroid_regex.match(self.wkt)
if m:
return (float(m.group('major')), float(m.group('flattening')))
else:
return None
@property
def name(self):
"Returns the projection name."
return self.srs.name
@property
def spheroid(self):
"Returns the spheroid name for this spatial reference."
return self.srs['spheroid']
@property
def datum(self):
"Returns the datum for this spatial reference."
return self.srs['datum']
@property
def projected(self):
"Is this Spatial Reference projected?"
if gdal.HAS_GDAL:
return self.srs.projected
else:
return self.wkt.startswith('PROJCS')
@property
def local(self):
"Is this Spatial Reference local?"
if gdal.HAS_GDAL:
return self.srs.local
else:
return self.wkt.startswith('LOCAL_CS')
@property
def geographic(self):
"Is this Spatial Reference geographic?"
if gdal.HAS_GDA
|
abawchen/leetcode
|
solutions/019_remove_nth_node_from_end_of_list.py
|
Python
|
mit
| 1,751
| 0.003427
|
# Given a linked list, remove the nth node from the end of list and return its head.
# For example,
# Given linked list: 1->2->3->4->5, and n = 2.
# After removing the second node from the end, the linked list becomes 1->2->3->5.
# Note:
# Given n will always be valid.
# Try to do this in one pass.
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param
|
{ListNode} head
# @param {integer} n
# @return {ListNode}
def removeNthFromEnd(self, head, n):
# no extra space
fast = slow = head
for _ in range(n):
fast = fast.next
if not
|
fast:
return head.next
while fast.next:
fast, slow = fast.next, slow.next
slow.next = slow.next.next
return head
# extra space O(n)
# if not head:
# return head
# i = 0
# dic = {}
# node = head;
# while node:
# dic[i] = node
# node = node.next
# i += 1
# p = i - n
# if p is 0:
# head = head.next
# else:
# dic[p-1].next = dic.get(p+1, None)
# return head
def printList(self, head):
print "==========="
cur = head
while cur:
print cur.val
cur = cur.next
print "==========="
s = Solution()
n0 = ListNode(1)
n1 = ListNode(2)
n2 = ListNode(3)
n3 = ListNode(4)
n4 = ListNode(5)
# n5 = ListNode(6)
# n6 = ListNode(7)
n0.next = n1
n1.next = n2
n2.next = n3
n3.next = n4
# n4.next = n5
# n5.next = n6
# n0 = s.removeNthFromEnd(n0, 3)
# n0 = s.removeNthFromEnd(None, 0)
s.printList(n0)
|
detiber/lib_openshift
|
lib_openshift/models/v1_image_source.py
|
Python
|
apache-2.0
| 5,181
| 0.001544
|
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1ImageSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'_from': 'V1ObjectReference',
'paths': 'list[V1ImageSourcePath]',
'pull_secret': 'V1LocalObjectReference'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'_from': 'from',
'paths': 'paths',
'pull_secret': 'pullSecret'
}
def __init__(self, _from=None, paths=None, pull_secret=None):
"""
V1ImageSource - a model defined in Swagger
"""
self.__from = _from
self._paths = paths
self._pull_secret = pull_secret
@property
def _from(self):
"""
Gets the _from of this V1ImageSource.
From is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to copy source from.
:return: The _from of this V1ImageSource.
:rtype: V1ObjectReference
"""
return self.__from
@_from.setter
def _from(self, _from):
"""
Sets the _from of this V1ImageSource.
From is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to copy source from.
:param _from: The _from of this V1ImageSource.
:type: V1ObjectReference
"""
self.__from = _from
@property
def paths(self):
"""
Gets the paths of this V1ImageSource.
Paths is a list of source and destination paths to copy from the image.
:return: The paths of this V1ImageSource.
:rtype: list[V1ImageSourcePath]
"""
return self._paths
@paths.setter
def paths(self, paths):
"""
Sets the paths of this V1ImageSource.
Paths is a list of source and destination paths to copy from the image.
:param paths: The paths of this V1ImageSource.
:type: list[V1ImageSourcePath]
"""
self._paths = paths
@property
def pull_secret(self):
"""
Gets the pull_secret of this V1ImageSource.
PullSecret is a reference to a secret to be used to pull the image from a registry If the image is pulled from the OpenShift registry, this field does not need to be set.
:return: The pull_secret of this V1ImageSource.
:rtype: V1LocalObjectReference
"""
return self._pull_secret
@pull_secret.setter
def pull_secret(self, pull_secret):
"""
Sets the pull_secret of this V1ImageSource.
PullSecret is a reference to a secret to be used to pull the image from a registry If the image is pulled from the OpenShift registry, this field does not need to be set.
:param pull_secret: The pull_secret of this V1ImageSource.
:type: V1LocalObjectReference
"""
self._pull_secret = pull_secret
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1ImageSource.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
|
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict
|
):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
gencer/python-phonenumbers
|
python/phonenumbers/shortdata/region_NZ.py
|
Python
|
apache-2.0
| 673
| 0.008915
|
"""Auto-generated file, do not edit by hand. NZ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_NZ = PhoneMetadata(id='NZ', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[14]\\d{2,3}', po
|
ssible_length=(3, 4)),
emergency=PhoneNumberDesc(national_number_pattern='111', example_number='111', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='111|4098', example_number='111', possible_length=(3, 4)),
sms_services=PhoneNumberDesc(national_number_pattern='4098', example_number='4098', possible_length=(4,)),
short_data=True
|
)
|
szymanskirafal/ab
|
zadania/urls.py
|
Python
|
mit
| 388
| 0.010309
|
from django.conf.urls import url
from .views import ZadanieCreateView, ZadanieDetailView, ZadanieUpdateView
urlpatterns = [
url(r'^dodaj/(?P<dopuszczenie_id>[0-9]+)/$', ZadanieCreateVie
|
w.as_view(), name='create'),
url(r'(?P<pk>
|
[0-9]+)/detail/$', ZadanieDetailView.as_view(), name='detail'),
url(r'(?P<pk>[0-9]+)/update/$', ZadanieUpdateView.as_view(), name='update'),
]
|
vaniakosmos/memes-reposter
|
server.py
|
Python
|
mit
| 432
| 0
|
import multiprocessing
import os
from datetime import timedelta
import easy_env
fr
|
om dotenv import find_dotenv, load_dotenv
load_dotenv(find_dotenv())
PORT = os.environ.get('PORT', '8000')
WEB_WORKERS = easy_env.get_int('WEB_WORKERS', multiprocessing.cpu_count())
bind = ":" + PORT
workers = WEB_WORKERS
timeout = timedelta(minutes=30).seconds
accesslog = '-'
access_log_format
|
= '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s'
|
tonygalmiche/ove_structure
|
is_api.py
|
Python
|
mit
| 13,118
| 0.008564
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import time
from openerp import pooler
from openerp.osv import fields, osv
from openerp.tools.translate import _
class is_api(osv.osv):
_name = 'is_api'
_description = u'Fonctions générales'
def get_usagers_structure(self, cr, uid, structure_id, context=None):
""" Retourner la liste des usagers appartenants à la structure passée en paramètre
"""
usager_line_obj = self.pool.get('ove.usager.structure')
line_ids = usager_line_obj.search(cr, uid, [('structure_id','=',structure_id)], context=context)
print 'line_ids *****', line_ids
usagers = list(set([line['usager_id'][0] for line in usager_line_obj.read(cr, uid, line_ids, ['usager_id'], context=context)]))
return usagers
def get_usager_groups(self, cr, uid, usager_id, context=None):
""" Retourner les groupes associés à l'usager passé en paramètre
"""
group_obj = self.pool.get('ove.groupe')
group_ids = group_obj.search(cr, uid, [('usager_id','=', usager_id)], context=context)
groups = []
for group in group_obj.read(cr, uid, group_ids, ['id', 'code'], context=context):
groups.append({'id':group['id'], 'code':group['code']})
newlist = sorted(groups, key=lambda k: k['code'])
return newlist
def get_users_usager(self, cr, uid, structure_lines, context=None):
""" Retourner les utilisateurs liés aux groupes de l'usager à partir des structures qui leurs appartient
"""
users = {'group_1':[], 'group_2':[], 'group_3':[], 'group_4':[], 'group_5':[],
'group_6':[], 'group_7':[], 'group_8':[], 'group_9':[], 'group_10':[]
}
if not structure_lines:
return users
for line in structure_lines:
if line.structure_id.users_line:
for user_line in line.structure_id.users_line:
if user_line.group_1 and line.group_1:
users['group_1'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_2 and line.group_2:
users['group_2'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_3 and line.group_3:
users['group_3'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_4 and line.group_4:
users['group_4'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_5 and line.group_5:
users['group_5'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_6 and line.group_6:
users['group_6'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_7 and line.group_7:
users['group_7'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_8 and line.group_8:
users['group_8'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
if user_line.group_9 and line.group_9:
users['group_9'].append(user_line.user_id.id)
users['group_10'].append(user_line.user_id.id)
""" Eliminer les doublons des listes """
users.update({'group_1': list(set(users['group_1']))})
users.update({'group_2': list(set(users['group_2']))})
users.update({'group_3': list(set(users['group_3']))})
users.update({'group_4': list(set(users['group_4']))})
users.update({'group_5': list(set(users['group_5']))})
users.update({'group_6': list(set(users['group_6']))})
users.update({'group_7': list(set(users['group_7']))})
users.update({'group_8': list(set(users['group_8']))})
users.update({'group_9': list(set(users['group_9']))})
users.update({'group_10': list(set(users['group_10']))})
return users
def create_group(self, cr, uid, code_groupe, prefix, name_group, users, usager_id, context=None):
""" Création d'un groupe OVE
"""
vals = {
'code': code_groupe,
'name': prefix + ' - ' + name_group,
'user_ids': [[6, 0, users]],
'usager_id': usager_id,
}
return self.pool.get('ove.groupe').create(cr, uid, vals, context=context)
def associate_groupe_usager(self, cr, uid, usager_id, group_id, group_usager, context=None):
""" Associer un groupe au groupe correspondant de l'usager
"""
usager_obj = self.pool.get('is.usager')
if group_usager == 'G1':
usager_obj.write(cr, uid, usager_id, {'group_1': group_id}, context=context)
if group_usager == 'G2':
usager_obj.write(cr, uid, usager_id, {'group_2': group_id}, context=context)
if group_usager == 'G3':
|
usager_obj.write(cr, uid, usager_id, {'group_3': group_id}, context=context)
if group_usager == 'G4':
usager_obj.write(cr, uid, usager_id, {'group_4': group_id}, context=context)
if group_usag
|
er == 'G5':
usager_obj.write(cr, uid, usager_id, {'group_5': group_id}, context=context)
if group_usager == 'G6':
usager_obj.write(cr, uid, usager_id, {'group_6': group_id}, context=context)
if group_usager == 'G7':
usager_obj.write(cr, uid, usager_id, {'group_7': group_id}, context=context)
if group_usager == 'G8':
usager_obj.write(cr, uid, usager_id, {'group_8': group_id}, context=context)
if group_usager == 'G9':
usager_obj.write(cr, uid, usager_id, {'group_9': group_id}, context=context)
if group_usager == 'G10':
usager_obj.write(cr, uid, usager_id, {'group_10': group_id}, context=context)
return True
def create_ove_groups(self, cr, uid, prefix, users, usager_id, context=None):
""" Création de l'ensemble des groupes pour chaque usager ou structure
"""
group_id = self.create_group(cr, uid, 'G1', prefix, 'Groupe Impression', users['group_1'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G1', context)
group_id = self.create_group(cr, uid, 'G2', prefix, 'Groupe Donnée Administrative', users['group_2'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G2', context)
group_id = self.create_group(cr, uid, 'G3', prefix, 'Groupe Donnée Administrative Modification', users['group_3'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G3', context)
group_id = self.create_group(cr, uid, 'G4', prefix, 'Groupe Donnée Institutionnelle', users['group_4'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G4', context)
group_id = self.create_group(cr, uid, 'G5', prefix, 'Groupe Donnée Institutionnelle Modification', users['group_5'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G5', context)
group_id = self.create_group(cr, uid, 'G6', prefix, 'Groupe Donnée Institutionnelle Validation', users['group_6'], usager_id, context=context)
self.associate_groupe_usager(cr, uid, usager_id, group_id, 'G6', context)
group_id = self.create_group(cr, uid, 'G7', prefix, 'Groupe Donnée métier', users['group_7'], usa
|
bbradbury/yum-utils
|
debuginfo-install.py
|
Python
|
gpl-2.0
| 7,411
| 0.004723
|
#!/usr/bin/python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Copyright 2007 Seth Vidal
import sys
import os
sys.path.insert(0,'/usr/share/yum-cli/')
import yum
import yum.Errors
from utils import YumUtilBase
from yum import _
import logging
import rpmUtils
plugin_autodebuginfo_package_name = "yum-plugin-auto-update-debug-info"
class DebugInfoInstall(YumUtilBase):
NAME = 'debuginfo-install'
VERSION = '1.0'
USAGE = """
de
|
buginfo-install: Install debuginfo packages and their dependencies based on
the name of the non-debug package
debuginfo-install [options] package1 [package2] [package..]"""
def __init__(self):
YumUtilBase.__init__(self,
|
DebugInfoInstall.NAME,
DebugInfoInstall.VERSION,
DebugInfoInstall.USAGE)
self.logger = logging.getLogger("yum.verbose.cli.debuginfoinstall")
self.optparser = self.getOptionParser()
opts = self.optparser
# Add util commandline options to the yum-cli ones
if hasattr(self, 'getOptionGroup'):
opts = self.getOptionGroup()
opts.add_option("", "--no-debuginfo-plugin",
action="store_true",
help="Turn off automatic installation/update of the yum debuginfo plugin")
self.main()
def doUtilConfigSetup(self, *args, **kwargs):
""" We override this to get our extra option out. """
opts = YumUtilBase.doUtilConfigSetup(self, *args, **kwargs)
self.no_debuginfo_plugin = opts.no_debuginfo_plugin
return opts
def main(self):
# Parse the commandline option and setup the basics.
opts = self.doUtilConfigSetup()
# Check if there is anything to do.
if len(self.cmds) < 1:
print self.optparser.format_help()
sys.exit(0)
if os.geteuid() != 0:
print >> sys.stderr, "You must be root to run this command."
sys.exit(1)
try:
self.doLock()
except yum.Errors.LockError, e:
self.logger.critical("Another application is holding the yum lock, cannot continue")
sys.exit(1)
# enable the -debuginfo repos for enabled primary repos
repos = {}
for repo in self.repos.listEnabled():
repos[repo.id] = repo
for repoid in repos:
di = '%s-debuginfo' % repoid
if di in repos:
continue
repo = repos[repoid]
for r in self.repos.findRepos(di):
self.logger.log(yum.logginglevels.INFO_2,
_('enabling %s') % r.id)
r.enable()
# Note: This is shared with auto-update-debuginfo
for opt in ['repo_gpgcheck', 'gpgcheck', 'cost',
'skip_if_unavailable']:
if hasattr(r, opt):
setattr(r, opt, getattr(repo, opt))
# Setup yum (Ts, RPM db, Repo & Sack)
self.doUtilYumSetup()
self.debugInfo_main()
if hasattr(self, 'doUtilBuildTransaction'):
errc = self.doUtilBuildTransaction()
if errc:
sys.exit(errc)
else:
try:
self.buildTransaction()
except yum.Errors.YumBaseError, e:
self.logger.critical("Error building transaction: %s" % e)
sys.exit(1)
if len(self.tsInfo) < 1:
print 'No debuginfo packages available to install'
self.doUnlock()
sys.exit()
sys.exit(self.doUtilTransaction())
def di_try_install(self, po):
if po.name.endswith('-debuginfo'): # Wildcard matches produce this
return
di_name = '%s-debuginfo' % po.name
if self.pkgSack.searchNevra(name=di_name, arch=po.arch):
test_name = di_name
ver, rel = po.version, po.release
else:
srpm_data = rpmUtils.miscutils.splitFilename(po.sourcerpm) # take the srpmname
srpm_name, ver, rel = srpm_data[0], srpm_data[1], srpm_data[2]
test_name = '%s-debuginfo' % srpm_name
self.install(name=test_name, arch=po.arch, version=ver, release=rel)
def debugInfo_main(self):
"""for each package specified, walk the package's list of deps and install
all the -debuginfo pkgs that match it and its debuginfo"""
# for each pkg
# add that debuginfo to the ts
# look through that pkgs' deps
# add all the debuginfos for the pkgs providing those deps
for pkgglob in self.cmds:
e, m, u = self.rpmdb.matchPackageNames([pkgglob])
for po in e + m:
try:
self.di_try_install(po)
except yum.Errors.InstallError, e:
self.logger.critical('Could not find debuginfo for main pkg: %s' % po)
# do each of its deps
for (n,f,v) in po.requires:
if n.startswith('rpmlib'):
continue
if n.find('.so') != -1:
for pkgtup in self.rpmdb.whatProvides(n,f,v):
deppo = self.rpmdb.searchPkgTuple(pkgtup)[0]
try:
self.di_try_install(deppo)
except yum.Errors.InstallError, e:
self.logger.critical('Could not find debuginfo pkg for dependency package %s' % deppo)
# This is kinda hacky, accessing the option from the plugins code
# but I'm not sure of a better way of doing it
if not self.no_debuginfo_plugin and self.tsInfo:
try:
self.install(pattern=plugin_autodebuginfo_package_name)
except yum.Errors.InstallError, e:
self.logger.critical('Could not find auto debuginfo plugin')
if __name__ == '__main__':
import locale
# This test needs to be before locale.getpreferredencoding() as that
# does setlocale(LC_CTYPE, "")
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error, ex:
# default to C locale if we get a failure.
print >> sys.stderr, 'Failed to set locale, defaulting to C'
os.environ['LC_ALL'] = 'C'
locale.setlocale(locale.LC_ALL, 'C')
if True: # not sys.stdout.isatty():
import codecs
sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout)
sys.stdout.errors = 'replace'
util = DebugInfoInstall()
|
gdikos/qstk-on-ec2
|
market_sim.py
|
Python
|
mit
| 5,585
| 0.005909
|
import datetime as dt
import numpy as np
import pandas as pd
# QSTK Imports
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkutil.qsdateutil as du
def get_orders_list(s_file_path):
l_columns = ["year", "month", "day", "sym", "type", "num"]
df_orders_list = pd.read_csv(s_file_path, sep=',', header=None)
df_orders_list = df_orders_list.dropna(axis=1, how='all')
df_orders_list.columns = l_columns
return df_orders_list
def get_orders(df_orders_list):
na_orders_list = df_orders_list.values
l_orders = []
ld_daily_orders = None
for order in na_orders_list:
dt_date = dt.datetime(order[0], order[1], order[2], hour=16)
d_order = {df_orders_list.columns[3]: order[3], \
df_orders_list.columns[4]: order[4], \
df_orders_list.columns[5]: int(order[5])}
if l_orders != [] and dt_date == l_orders[-1][0]:
l_orders[-1][1].append(d_order)
else:
ld_daily_orders = []
ld_daily_orders.append(d_order)
l_orders.append([dt_date, ld_daily_orders])
na_orders = np.array(l_orders)
df_orders = pd.DataFrame(na_orders[:, 1], index=na_orders[:, 0], columns=["ord"])
df_orders = df_orders.sort()
dt_start = df_orders.ix[0].name
dt_end = df_orders.ix[-1].name
ls_symbols = list(set(df_orders_list["sym"]))
ls_symbols.sort() # It is neccesary to sort due the use of set
return df_orders, dt_start, dt_end, ls_symbols
def get_data(dt_start, dt_end, ls_symbols):
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16))
ls_keys = ["open", "high", "low", "close", "volume", "actual_close"]
dataobj = da.DataAccess('Yahoo')
ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method="ffill")
d_data[s_key] = d_data[s_key].fillna(method="bfill")
d_data[s_key] = d_data[s_key].fillna(1.0)
return d_data
def get_prices(dt_start, dt_end, ls_symbols, s_key="close"):
# close = adjusted close
# actual_close = actual close
d_data = get_data(dt_start, dt_end, ls_symbols)
return d_data[s_key]
def process_daily_orders(dt_date, df_orders, df_prices, df_num, df_val, df_res):
op = 0
daily_orders = list(df_orders.ix[dt_date, "ord"])
for order in daily_orders:
if order["type"] == "Buy":
op = 1
elif order["type"] == "Sell":
op = -1
df_num.ix[dt_date, order["sym"]] += op * order["num"]
df_res.ix[dt_date, "cash"] += -op * order["num"] * df_prices.ix[dt_date, order["sym"]]
def update_port(dt_date, dt_last_orders_date, ls_symbols, df_num, df_res):
for s_symbol in ls_symbols:
df_num.ix[dt_date, s_symbol] = df_num.ix[dt_last_orders_date, s_symbol]
df_res.ix[dt_date, "cash"] = df_res.ix[dt_last_orders_date, "cash"]
def value_port(dt_date, ls_symbols, df_prices, df_num, df_val, df_res):
for s_symbol in ls_symbols:
df_val.ix[dt_date, s_symbol] = df_num.ix[dt_date, s_symbol] * df_prices.ix[dt_date, s_symbol]
df_res.ix[dt_date, "port"] = np.sum(df_val.ix[dt_date, :])
df_res.ix[dt_date, "total"] = df_res.ix[dt_date, "port"] + df_res.ix[dt_date, "cash"]
def process_orders(df_orders, df_prices, cash):
ldt_dates = list(df_prices.index)
ls_symbols = list(df_prices.columns)
df_num = pd.DataFrame(index=ldt_dates, columns=ls_symbols)
df_val = pd.DataFrame(index=ldt_dates, columns=ls_symbols)
df_res = pd.DataFrame(index=ldt_dates, columns=["port", "cash", "total"])
df_num = df_num.fillna(0.0)
df_val = df_val.fillna(0.0)
df_res = df_res.fillna(0.0)
df_res.ix[0, "cash"] = cash
ldt_orders_dates = list(df_orders.index)
iter_orders_dates = iter(ldt_orders_dates)
dt_orders_date = iter_orders_dates.next()
dt_last_orders_date = dt_orders_date
for dt_date in ldt_dates:
update_port(dt_date, dt_last_orders_date, ls_symbols, df_num, df_res)
if dt_date == dt_orders_date:
process_daily_orders(dt_date, df_orders, df_prices, df_num, df_val, df_res)
try:
dt_last_orders_date = dt_orders_date
dt_orders_date = iter_orders_dates.next()
except StopIteration:
pass
value_port(dt_date, ls_symbols, df_prices, df_num, df_val, df_res)
df_port = df_num.join(df_val, lsuffix="_num", rsuffix="_val").join(df_res)
#df_port.to_csv("port.csv")
return df_port
def save_values(df_port, s_out_file_path):
ldt_dates = df_port.index
na_dates = np.array([[dt_date.year, dt_date.month, dt_date.day] for dt_date in ldt_dates])
na_total = np.array(df_port["total"])
na_values = np.insert(arr=na_dates, obj=3, values=na_total, axis=1)
df_values = pd.DataFrame(na_values, columns=["year", "month", "day", "total"])
df_values.to_csv(s_out_file_path, sep=",", header=False, index=False)
if __name__ == '__main__':
print "start market_sim.py"
|
s_in_file_path = "data\\q1_orders.csv"
s_out_file_path = "data\\q1_values.csv"
s_cash = "100000"
f_cash = float(s_cash)
|
df_orders_list = get_orders_list(s_in_file_path)
df_orders, dt_start, dt_end, ls_symbols = get_orders(df_orders_list)
df_prices = get_prices(dt_start, dt_end, ls_symbols)
df_port = process_orders(df_orders, df_prices, f_cash)
save_values(df_port, s_out_file_path)
print "end market_sim.py"
|
callowayproject/django-massmedia
|
massmedia/admin.py
|
Python
|
apache-2.0
| 10,650
| 0.001784
|
from django.contrib import admin
from django.contrib.admin.widgets import AdminFileWidget, AdminURLFieldWidget
from django.contrib.contenttypes.models import ContentType
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from models import (Image, Video, Audio, Flash, Collection, Embed, Document,
CollectionRelation, MediaTemplate)
import settings
from forms import (ImageCreationForm, VideoCreationForm, AudioCreationForm,
FlashCreationForm, DocumentCreationForm, EmbedCreationForm)
# from templatetags.media_widgets import snipshot_url
class AdminImageWidget(AdminFileWidget):
def render(self, name, value, attrs=None):
output = []
if value and hasattr(value, 'instance') and value.instance.thumbnail:
thumbnail = value.instance.thumbnail.url
width = value.instance.thumb_width
height = value.instance.thumb_height
# snipshot = snipshot_url(value.instance)
# crop_tag = '''<br /><a class="link" href="#" onclick="var win = window.open('%s','snipshot', 'height=500,width=800,resizable=yes,scrollbars=yes');win.focus();">Crop image with snipshot</a>''' % snipshot
tag = u'<img src="%s" width="%s" height="%s"/>' % (
thumbnail, width, height)
else:
# crop_tag = u""
tag = _("<strong>No Thumbnail available</strong>")
if value:
output.append(u'<a href="%s" target="_blank">%s</a>' % (
value.url, tag))
# output.append(crop_tag)
return mark_safe(u''.join(output))
class AdminExternalURLWidget(AdminURLFieldWidget):
def render(self, name, value, attrs=None):
output = []
tag = _("<strong>No Thumbnail available</strong>")
if value:
output.append(u'<a href="%s" target="_blank">%s</a>' % (value, tag))
output.append(u'<br /><a href="%s" target="_blank">%s</a>' % (value, value))
return mark_safe(u''.join(output))
class GenericCollectionInlineModelAdmin(admin.options.InlineModelAdmin):
ct_field = 'content_type'
ct_fk_field = 'object_id'
fields = ('content_type', 'object_id', 'position')
extra = 3
def __init__(self, parent_model, admin_site):
super(GenericCollectionInlineModelAdmin, self).__init__(parent_model, admin_site)
ctypes = ContentType.objects.all().order_by('id').values_list('id', 'app_label', 'model')
elements = ["%s: '%s/%s'" % (x, y, z) for x, y, z in ctypes]
self.content_types = "{%s}" % ",".join(elements)
def get_formset(self, request, obj=None):
result = super(GenericCollectionInlineModelAdmin, self).get_formset(request, obj)
result.content_types = self.content_types
result.ct_fk_field = self.ct_fk_field
return result
class GenericCollectionTabularInline(GenericCollectionInlineModelAdmin):
template = 'admin/edit_inlines/gen_coll_tabular.html'
class MediaAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('title', 'caption')}),
(_("Content"), {'fields': (('file', 'external_url'),)}),
(_("Credit"), {'fields': ('author', 'one_off_author', 'reproduction_allowed')}),
(_("Metadata"), {'fields': ('metadata', 'mime_type')}),
(_("Connections"), {'fields': ('public', 'site')}),
# (_("Widget"), {'fields': ('width', 'height')}),
(_("Advanced options"), {
'classes': ('collapse',),
'fields': ('widget_template',)
}),
)
add_fieldsets = (
(None, {'fields': ('title',)}),
(_("Content"), {'fields': ('external_url', 'file', 'caption')}),
(_("Rights"), {'fields': ('public', 'reproduction_allowed')}),
(_("Additional Info"), {
'classes': ('collapse',),
'fields': ('creation_date', 'site')
})
)
list_display = ('title', 'author_name', 'mime_type', 'public', 'creation_date')
list_filter = ('site', 'creation_date', 'public')
list_editable = ('public',)
date_hierarchy = 'creation_date'
search_fields = ('caption', 'file')
raw_id_fields = ('author', )
add_form_template = 'admin/massmedia/content_add_form.html'
def get_fieldsets(self, request, obj=None):
"""
Return add_fieldsets if it is a new object and the form has specified
different fieldsets for creation vs. change. Otherwise punt.
"""
if not obj and hasattr(self, 'add_fieldsets'):
return self.add_fieldsets
return super(MediaAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Return a special add form if specified
"""
defaults = {}
if not obj and hasattr(self, 'add_form'):
defaults = {
'form': self.add_form
}
defaults.update(kwargs)
return super(MediaAdmin, self).get_form(request, obj, **defaults)
class ImageAdmin(MediaAdmin):
list_display = ('render_thumb', 'title', 'creation_date')
list_display_links = ('render_thumb', 'title', )
list_editable = tuple()
add_fieldsets = (
(_("Content"), {'fields': ('external_url', 'file', 'caption')}),
(_("Rights"), {'fields': ('public', 'reproduction_allowed')}),
(_("Additional Info"), {
'classes': ('collapse',),
'fields': ('title', 'creation_date', 'site')
})
)
add_form = ImageCreationForm
def get_urls(self):
from django.conf.urls import patterns, url
urls = super(ImageAdmin, self).get_urls()
my_urls = patterns('',
(r'^(?P<image_id>\d+)/crops/add/$', self.add_crop),
(r'^(?P<image_id>\d+)/crops/(?P<object_id>\d+)/$', self.update_crop),
(r'^(?P<image_id>\d+)/crops/(?P<object_id>\d+)/delete/$', self.delete_crop),
url(r'^close/$', self.close_window, name="imagecustomsize_close"),
)
return my_urls + urls
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'file':
kwargs['widget'] = AdminImageWidget
kwargs.pop('request')
return db_field.formfield(**kwargs)
elif db_field.name == 'external_url':
kwargs['widget'] = AdminExternalURLWidget
kwargs.pop('request')
return db_field.formfield(**kwargs)
return super(ImageAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def add_crop(self, request, image_id):
from massmedia.views import ImageCustomSizeCreate
return ImageCustomSizeCreate.as_view()(request, image_id=image_id)
def delete_crop(self, request, image_id, object_id):
from massmedia.views import ImageCustomSizeDelete
return ImageCustomSizeDelete.as_view()(request, image_id=image_id, object_id=object_id)
def update_crop(self, request, image_id, object_id):
from massmedia.views import ImageCustomSizeUpdate
return ImageCustomSizeUpdate.as_view()(request, image_id=image_id, object_id=object_id)
def close_window(self, request):
from django.views.generic.base import TemplateView
return TemplateView.as_view(template_name='admin/massmedia/imagecustomsize/close_window.html')(request)
class VideoAdmin(MediaAdmin):
list_display = ('title', 'thumb', 'author_name', 'mime_type',
'public', 'creation_date')
fieldsets = (
|
(None, {'fields': ('title', 'caption')}),
(_("Content"), {'fields': (('file', 'external_url'), 'thumbnail')}),
(_("Credit"), {'fields': ('author', 'one_off_author', 'reproduction_allowed')}),
(_("Metadata"), {'fields': ('metadata', 'mime_type')}),
(_("Connections"), {'fields': ('public', 'site')}),
(_("W
|
idget"), {'fields': ('width', 'height')}),
(_("Advanced options"), {
'classes': ('collapse',),
'fields': ('widget_template',)
}),
)
raw_id_fields = ('thumbnail',)
add_fieldsets = (
(None, {'fields': ('title', )}),
(_("Content"), {'fields': (('external_url', 'f
|
LethusTI/supportcenter
|
vendor/mongoengine/mongoengine/django/mongo_auth/models.py
|
Python
|
gpl-3.0
| 3,378
| 0.000592
|
from django.conf import settings
from django.contrib.auth.models import UserManager
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.importlib import import_module
from django.utils.translation import ugettext_lazy as _
__all__ = (
'get_user_document',
)
MONGOENGINE_USER_DOCUMENT = getattr(
settings, 'MONGOENGINE_USER_DOCUMENT', 'mongoengine.django.auth.User')
def get_user_document():
"""Get the user document class used for authentication.
This is the class defined in settings.MONGOENGINE_USER_DOCUMENT, which
defaults to `mongoengine.django.auth.User`.
"""
name = MONGOENGINE_USER_DOCUMENT
dot = name.rindex('.')
module = import_module(name[:dot])
return getattr(module, name[dot + 1:])
class MongoUserManager(UserManager):
"""A User manager wich allows the use of MongoEngine documents in Django.
To use the manager, you must t
|
ell django.contrib.auth to use MongoUser as
the user model. In you settings.py, you need:
INSTAL
|
LED_APPS = (
...
'django.contrib.auth',
'mongoengine.django.mongo_auth',
...
)
AUTH_USER_MODEL = 'mongo_auth.MongoUser'
Django will use the model object to access the custom Manager, which will
replace the original queryset with MongoEngine querysets.
By default, mongoengine.django.auth.User will be used to store users. You
can specify another document class in MONGOENGINE_USER_DOCUMENT in your
settings.py.
The User Document class has the same requirements as a standard custom user
model: https://docs.djangoproject.com/en/dev/topics/auth/customizing/
In particular, the User Document class must define USERNAME_FIELD and
REQUIRED_FIELDS.
`AUTH_USER_MODEL` has been added in Django 1.5.
"""
def contribute_to_class(self, model, name):
super(MongoUserManager, self).contribute_to_class(model, name)
self.dj_model = self.model
self.model = get_user_document()
self.dj_model.USERNAME_FIELD = self.model.USERNAME_FIELD
username = models.CharField(_('username'), max_length=30, unique=True)
username.contribute_to_class(self.dj_model, self.dj_model.USERNAME_FIELD)
self.dj_model.REQUIRED_FIELDS = self.model.REQUIRED_FIELDS
for name in self.dj_model.REQUIRED_FIELDS:
field = models.CharField(_(name), max_length=30)
field.contribute_to_class(self.dj_model, name)
def get(self, *args, **kwargs):
try:
return self.get_query_set().get(*args, **kwargs)
except self.model.DoesNotExist:
# ModelBackend expects this exception
raise self.dj_model.DoesNotExist
@property
def db(self):
raise NotImplementedError
def get_empty_query_set(self):
return self.model.objects.none()
def get_query_set(self):
return self.model.objects
class MongoUser(models.Model):
""""Dummy user model for Django.
MongoUser is used to replace Django's UserManager with MongoUserManager.
The actual user document class is mongoengine.django.auth.User or any
other document class specified in MONGOENGINE_USER_DOCUMENT.
To get the user document class, use `get_user_document()`.
"""
objects = MongoUserManager()
|
mikedingjan/wagtail
|
wagtail/embeds/apps.py
|
Python
|
bsd-3-clause
| 347
| 0
|
from django.apps import AppConfig
from
|
django.utils.translation import ugettext_lazy as _
from .finders import get_finders
class WagtailEmbeds
|
AppConfig(AppConfig):
name = 'wagtail.embeds'
label = 'wagtailembeds'
verbose_name = _("Wagtail embeds")
def ready(self):
# Check configuration on startup
get_finders()
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/pointcloud/_meta.py
|
Python
|
mit
| 438
| 0
|
impor
|
t _plotly_utils.basevalidators
class MetaValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="meta", parent_name="pointcloud", **kwargs):
super(MetaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
**kw
|
args
)
|
arturcalves/dqc
|
dqc_django/wsgi.py
|
Python
|
mit
| 397
| 0
|
"""
WSGI config for dqc_django project.
It exposes the
|
WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "
|
dqc_django.settings")
application = get_wsgi_application()
|
Typecraft/norsourceparser
|
norsourceparser/core/rules.py
|
Python
|
mit
| 11,982
| 0.002921
|
# coding: utf-8
"""
This file contains methods for translation norsource rules into rules we can
convert easily to a Typeraft compatible format.
"""
import re
from norsourceparser.core.config import config
from norsourceparser.core.constants import REDUCED_RULE_POS, REDUCED_RULE_GLOSSES, REDUCED_RULE_MORPHOLOGICAL_BREAKUP, \
REDUCED_RULE_VALENCY, REDUCED_RULE_CITATION_FORM, REDUCED_RULE_CONSTRUCTION_FORM, REDUCED_RULE_PRIORITY_AMBIGUOUS, \
REDUCED_RULE_PRIORITY_MERGE, REDUCED_RULE_PRIORITY_DOMINATE
from norsourceparser.core.util import get_pos, get_inflectional_rules, get_valency, get_dominating_pos_rule, \
get_dominating_gloss_rule
from norsourceparser.core.util import split_lexical_entry, get_gloss
class Rule(object):
def __init__(
self,
rule_id,
va
|
lue,
priority=REDUCED_RULE_PRIORITY_AMBIGUOUS
):
self.rule_id = rule_id
self.value
|
= value
self.priority = priority
def __unicode__(self):
return u"%d %s (Priority %d)" % (self.rule_id, self.value, self.priority)
def get_rules_from_partial_branch(partial_branch):
"""
This method is the main `entry-point` for inferring rules from a branch.
The method will analyse the branch for POS and GLOSS-tags, and possibly morphological
breakups.
:param partial_branch: A list of branch-entries.
:return: Array of rules
"""
# If we are at the terminal, we do nothing just yet.
if len(partial_branch) < 2:
return
rules = []
second_node = partial_branch[1]
terminal = partial_branch[0]
# With the terminal and second node, we can get information
# from the lexical entry
[stem, pos, gloss] = split_lexical_entry(second_node.name)
pos = get_pos(pos, None) or get_pos(second_node.name, None)
gloss = get_gloss(gloss, None) or get_gloss(second_node.name, None)
# If
if len(partial_branch) == 2 and config.DEBUG:
if pos is None:
print("UNABLE TO FIND POS FOR RULE: %s" % second_node.name)
if len(partial_branch) == 2:
# If we only have access to the lexical entry, we return what rules
# we can from here.
# Verbs might yield some valency information here
if pos == "V":
rules.extend(get_verb_valency_rule(partial_branch))
rules.extend(parse_lexical_entry(terminal, stem, pos, gloss))
return rules
if 'bli_pass' in partial_branch[1].name:
# We look for the special case of a bli_pass case here
rules.extend(get_bli_passive_rules(partial_branch))
else:
rules.extend(get_gloss_rules_from_partial_branch(partial_branch))
rules.extend(get_dominating_rules(partial_branch))
if pos == "N":
# If the pos is a Noun, we look for the special noun inflectional rules
rules.extend(get_noun_inflectional_rule(partial_branch))
rules.extend(get_complex_rules(partial_branch))
return rules
def parse_lexical_entry(terminal, stem, pos, gloss):
"""
This method helps us to parse a lexical entry.
To do this, we need the extracted stem pos and gloss from the rule,
as well as the terminal.
:param terminal: The terminal node, which will contain the dictonary-form of the
word we are trying to associate rules with.
:param stem: The parsed stem-form of the word.
:param pos: The POS-tag of the word.
:param gloss: Any gloss-tags so far found of the word.
:return: An array of rules.
"""
rules = []
# Here we are parsing the lexical entry of the branch
if pos is not None:
rules.append(Rule(REDUCED_RULE_POS, pos))
# This happens on e.g. punctuations
if stem is not None and pos is None and gloss is None:
rules.append(Rule(REDUCED_RULE_POS, pos))
# We capture morphological breakup and glosses here.
# This information may get overwritten later up the tree/branch. Yet
# we still do this step in case we have some missing information later up the tree.
if pos in ['N', 'V', 'ADJ']:
rules.append(Rule(REDUCED_RULE_CITATION_FORM, stem))
if stem != terminal.name and stem in terminal.name:
rules.append(Rule(REDUCED_RULE_MORPHOLOGICAL_BREAKUP, [stem, re.sub("^"+stem, "", terminal.name)]))
# We do this here so we can capture the correct position
if gloss is not None:
rules.append(Rule(REDUCED_RULE_GLOSSES, ["", gloss], REDUCED_RULE_PRIORITY_MERGE))
else:
if stem not in terminal.name:
# We have morphology, but it is non-concatenative
rules.append(Rule(REDUCED_RULE_MORPHOLOGICAL_BREAKUP, [terminal.name]))
else:
# We have no morphology at all here, we don't have any inflections here.
rules.append(Rule(REDUCED_RULE_MORPHOLOGICAL_BREAKUP, [stem]))
# We do this here so we can capture the correct position
if gloss is not None:
rules.append(Rule(REDUCED_RULE_GLOSSES, [gloss], REDUCED_RULE_PRIORITY_MERGE))
else:
rules.append(Rule(REDUCED_RULE_MORPHOLOGICAL_BREAKUP, [terminal.name]))
if gloss is not None:
rules.append(Rule(REDUCED_RULE_GLOSSES, [gloss], REDUCED_RULE_PRIORITY_MERGE))
return rules
def get_noun_inflectional_rule(partial_branch):
"""
This method helps us to parse an inflectional rule for a noun.
The method accepts a partial branch, but only proceeds if the branch is at least
of length 3. We allow this flexibility as we might not want to control the method-calls to
this method in the calling methods.
If the POS of the branch is found not to be a noun, we simply return.
:param partial_branch: A partial branch.
:return: An array, potentially filled with rules.
"""
rules = []
if len(partial_branch) < 3:
return rules
# Here we are looking for the inflectional rules for nouns
last_node = partial_branch[-1]
lexical_node = partial_branch[1]
terminal = partial_branch[0]
[stem, pos, _] = split_lexical_entry(lexical_node.name)
pos = get_pos(pos, None) or get_pos(lexical_node.name, None)
if pos != 'N':
return rules
inf_rules = get_inflectional_rules(stem, last_node.name)
if inf_rules is None:
return rules
[current_suffix, suffix, glosses] = inf_rules
if glosses is None and config.DEBUG:
print("NONE GLOSSES", glosses)
if current_suffix is None or suffix is None:
# This happens on the rule pl_ind_n_short_0_irule
rules.append(Rule(REDUCED_RULE_MORPHOLOGICAL_BREAKUP, [stem]))
rules.append(Rule(REDUCED_RULE_GLOSSES, [".".join(glosses)], REDUCED_RULE_PRIORITY_MERGE))
else:
if current_suffix == '*':
morphological_breakup = [stem, suffix]
glosses = ["", ".".join(glosses)]
else:
if current_suffix not in suffix:
morphological_breakup = [terminal.name]
glosses = ["".join(glosses)]
else:
morphological_breakup = [stem, re.sub("^"+current_suffix, "", suffix)]
glosses = ["", ".".join(glosses)]
rules.append(Rule(REDUCED_RULE_MORPHOLOGICAL_BREAKUP, morphological_breakup))
rules.append(Rule(REDUCED_RULE_GLOSSES, glosses, REDUCED_RULE_PRIORITY_MERGE))
return rules
def get_gloss_rules_from_partial_branch(partial_tree):
"""
Tries to get rules for something other than a verb, noun or adjective. We do this simply by doing a lookup
in the non-inflectional table. This is of course all encapsulated in the get_gloss method, so we just call that,
fishing for luck.
:param partial_tree:
:return: An array of rules
"""
last_rule = partial_tree[-1].name
lexical_rule = partial_tree[1].name
terminal = partial_tree[0].name
[stem, pos, _] = split_lexical_entry(lexical_rule)
pos = get_pos(pos, None) or get_pos(lexical_rule, None)
maybe_gloss = get_gloss(last_rule)
if maybe_gloss is not None:
if pos in ['N', 'ADJ', 'V']:
if stem != t
|
pmatigakis/Huginn
|
huginn/cli/huginn_record.py
|
Python
|
bsd-3-clause
| 2,622
| 0.000381
|
"""
The huginn_record script is used to record flight data from the simulator
"""
from argparse import ArgumentParser
import json
from twisted.internet import reactor
from twisted.web.client import Agent, readBody
from twisted.web.http_headers import Headers
from twisted.internet.task import LoopingCall
from huginn import configuration
from huginn.io import CSVFDMDataWriter
def get_arguments():
parser = ArgumentParser(description="Record the fdm data")
parser.add_argument("--host",
action="store",
default="127.0.0.1",
help="the simulator ip address")
parser.add_argument("--port",
action="store",
default=configuration.WEB_SERVER_PORT,
type=int,
help="the simulator http port")
parser.add_argument("--dt",
default=1.0,
help="How often to request data from the simulator")
parser.add_argument("output", help="the output file")
return parser.parse_args()
def request_fdm_data(args, csv_telemetry_writer):
agent = Agent(reactor)
url = "http://%s:%d/fdm" % (args.host, args.port)
d = agent.request("GET",
url,
Headers({}),
None)
d.addCallback(process_fdm_data_response, csv_telemetry_writer)
return d
def process_fdm_data_response(response, csv_telemetry_writer):
d = readBody(response)
d.addCallback(save_fdm_data, csv_telemetry_writer)
return d
def save_fdm_data(body, csv_telemetry_writer):
fdm_data = json.loads(body)
csv_telemetry_writer.write_fdm_data(fdm_data)
for variable in ["time", "altitude", "airspeed", "heading"]:
print("%s\t%f" % (variable, fdm_data[varia
|
ble]))
print ("")
def main():
args = get_arguments()
output_file = open(args.output, "w")
variables = ["time", "dt", "latitude", "longitude", "altitude",
"airspeed", "heading", "x_acceleration", "y_acceleration",
"z_acceleration", "roll_rate", "pitch_rate", "yaw_rate",
"temperature", "static_pressure", "total_pressure",
"roll", "pitch", "thrust", "aileron", "elevator", "rudder",
"thrott
|
le"]
csv_telemetry_writer = CSVFDMDataWriter(variables, output_file)
csv_telemetry_writer.write_header()
task = LoopingCall(request_fdm_data, args, csv_telemetry_writer)
task.start(args.dt)
reactor.run() # @UndefinedVariable
output_file.close()
|
sbktechnology/trufil-frappe
|
frappe/config/setup.py
|
Python
|
mit
| 5,480
| 0.042336
|
from __future__ import unicode_literals
from frappe import _
from frappe.desk.moduleview import add_setup_section
def get_data():
data = [
{
"label": _("Users"),
"icon": "icon-group",
"items": [
{
"type": "doctype",
"name": "User",
"description": _("System and Website Users")
},
{
"type": "doctype",
"name": "Role",
"description": _("User Roles")
}
]
},
{
"label": _("Permissions"),
"icon": "icon-lock",
"items": [
{
"type": "page",
"name": "permission-manager",
"label": _("Role Permissions Manager"),
"icon": "icon-lock",
"description": _("Set Permissions on Document Types and Roles")
},
{
"type": "page",
"name": "user-permissions",
"label": _("User Permissions Manager"),
"icon": "icon-shield",
"description": _("Set Permissions per User")
},
{
"type": "page",
"name": "modules_setup",
"label": _("Show / Hide Modules"),
"icon": "icon-upload",
"description": _("Show or hide modules globally.")
},
{
"type": "report",
"is_query_report": True,
"doctype": "User",
"icon": "icon-eye-open",
"name": "Permitted Documents For User",
"description": _("Check which Documents are readable by a User")
},
{
"type": "report",
"doctype": "DocShare",
"icon": "icon-share",
"name": "Document Share Report",
"description": _("Report of all document shares")
}
]
},
{
"label": _("Settings"),
"icon": "icon-wrench",
"items": [
{
"type": "doctype",
"name": "System Settings",
"label": _("System Settings"),
"description": _("Language, Date and Time settings"),
"hide_count": True
},
{
"type": "doctype",
"name": "Scheduler Log",
"description": _("Log of error on automated events (scheduler).")
},
]
},
{
"label": _("Data"),
"icon": "icon-th",
"items": [
{
"type": "page",
"name": "data-import-tool",
"label": _("Import / Export Data"),
"icon": "icon-upload",
"description": _("Import / Export Data from .csv files.")
},
{
"type": "doctype",
"name": "Naming Series",
"description": _("Set numbering series for transactions."),
"hide_count": True
},
{
"type": "doctype",
"name": "Rename Tool",
"label": _("Bulk Rename"),
"description": _("Rename many items by uploading a .csv file."),
"hide_count": True
},
{
"type": "page",
"name": "backups",
"label": _("Download Backups"),
"description": _("List of backups available for download"),
"icon": "icon-download"
},
]
},
{
"label": _("Email"),
"icon": "icon-envelope",
"items": [
{
"type": "doctype",
"name": "Email Account",
"description": _("Add / Manage Email Accounts.")
},
{
"type": "doctype",
"name": "Email Alert",
"description": _("Setup Email Alert based on various criteria.")
},
{
"type": "doctype",
"name": "Standard Reply",
"description": _("Standard replies to common queries.")
},
]
},
{
"label": _("Printing"),
"icon": "icon-print",
"items": [
{
"type": "page",
"label": "Print Format Builder",
"name": "print-format-builder",
"description"
|
: _("Drag and Drop tool to build and customize Print Formats.")
},
{
"type": "doctype",
"name": "Print Settings",
"description": _("Set default format, page size, print s
|
tyle etc.")
},
{
"type": "doctype",
"name": "Print Format",
"description": _("Customized HTML Templates for printing transactions.")
},
]
},
{
"label": _("Workflow"),
"icon": "icon-random",
"items": [
{
"type": "doctype",
"name": "Workflow",
"description": _("Define workflows for forms.")
},
{
"type": "doctype",
"name": "Workflow State",
"description": _("States for workflow (e.g. Draft, Approved, Cancelled).")
},
{
"type": "doctype",
"name": "Workflow Action",
"description": _("Actions for workflow (e.g. Approve, Cancel).")
},
]
},
{
"label": _("Integrations"),
"icon": "icon-star",
"items": [
{
"type": "page",
"name": "applications",
"label": _("Application Installer"),
"description": _("Install Applications."),
"icon": "icon-download"
},
{
"type": "doctype",
"name": "Social Login Keys",
"description": _("Enter keys to enable login via Facebook, Google, GitHub."),
},
{
"type": "doctype",
"name": "Dropbox Backup",
"description": _("Manage cloud backups on Dropbox"),
"hide_count": True
}
]
},
{
"label": _("Customize"),
"icon": "icon-glass",
"items": [
{
"type": "doctype",
"name": "Customize Form",
"description": _("Change field properties (hide, readonly, permission etc.)"),
"hide_count": True
},
{
"type": "doctype",
"name": "Custom Field",
"description": _("Add fields to forms.")
},
{
"type": "doctype",
"name": "Custom Script",
"description": _("Add custom javascript to forms.")
},
{
"type": "doctype",
"name": "DocType",
"description": _("Add custom forms.")
}
]
},
]
add_setup_section(data, "frappe", "website", _("Website"), "icon-globe")
return data
|
eviljeff/zamboni
|
mkt/abuse/models.py
|
Python
|
bsd-3-clause
| 3,467
| 0
|
import logging
from django.conf import settings
from django.db import models
from mkt.site.mail import send_mail
from mkt.site.models import ModelBase
from mkt.users.models import UserProfile
from mkt.webapps.models import Webapp
from mkt.websites.models import Website
log = logging.getLogger('z.abuse')
class AbuseReport(ModelBase):
# NULL if the reporter is anonymous.
reporter = models.ForeignKey(UserProfile, null=True,
blank=True, related_name='abuse_reported')
ip_address = models.CharField(max_length=255, default='0.0.0.0')
# An abuse report can be for an addon, a user, or a website. Only one of
# these should be null.
addon = models.ForeignKey(Webapp, null=True, related_name='abuse_reports')
user = models.ForeignKey(UserProfile, null=True,
related_name='abuse_reports')
website = models.ForeignKey(Website, null=True,
related_name='abuse_reports')
message = models.TextField()
read = models.BooleanField(default=False)
class Meta:
db_table = 'abuse_reports'
@property
def object(self):
return self.addon or self.user or self.website
def send(self):
obj = self.object
if self.reporter:
user_name = '%s (%s)' % (self.reporter.name, self.reporter.email)
else:
user_name = 'An anonymous coward'
if self.addon:
type_ = 'App'
elif self.user:
type_ = 'User'
else:
type_ = 'Website'
subject = u'[%s] Abuse Report for %s' % (type_, obj.name)
msg = u'%s reported abuse for %s (%s%s).\n\n%s' % (
user_name, obj.name, settings.SITE_URL, obj.get_url_path(),
self.message)
se
|
nd_mail(subject,
|
msg, recipient_list=(settings.ABUSE_EMAIL,))
@classmethod
def recent_high_abuse_reports(cls, threshold, period, addon_id=None):
"""
Returns AbuseReport objects for the given threshold over the given time
period (in days). Filters by addon_id if provided.
E.g. Greater than 5 abuse reports for all webapps in the past 7 days.
"""
abuse_sql = ['''
SELECT `abuse_reports`.*,
COUNT(`abuse_reports`.`addon_id`) AS `num_reports`
FROM `abuse_reports`
INNER JOIN `addons` ON (`abuse_reports`.`addon_id` = `addons`.`id`)
WHERE `abuse_reports`.`created` >= %s ''']
params = [period]
if addon_id:
abuse_sql.append('AND `addons`.`id` = %s ')
params.append(addon_id)
abuse_sql.append('GROUP BY addon_id HAVING num_reports > %s')
params.append(threshold)
return list(cls.objects.raw(''.join(abuse_sql), params))
def send_abuse_report(request, obj, message):
report = AbuseReport(ip_address=request.META.get('REMOTE_ADDR'),
message=message)
if request.user.is_authenticated():
report.reporter = request.user
if isinstance(obj, Webapp):
report.addon = obj
elif isinstance(obj, UserProfile):
report.user = obj
elif isinstance(obj, Website):
report.website = obj
report.save()
report.send()
# Trigger addon high abuse report detection task.
if isinstance(obj, Webapp):
from mkt.webapps.tasks import find_abuse_escalations
find_abuse_escalations.delay(obj.id)
|
kevinlee12/oppia
|
scripts/linters/css_linter_test.py
|
Python
|
apache-2.0
| 4,592
| 0.000653
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apa
|
che License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the
|
specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/linters/css_linter.py."""
from __future__ import annotations
import os
import subprocess
from core.tests import test_utils
from scripts import scripts_test_utils
from . import css_linter
PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
CONFIG_PATH = os.path.join(
PARENT_DIR, 'oppia', 'core', 'templates', 'css', '.stylelintrc')
LINTER_TESTS_DIR = os.path.join(os.getcwd(), 'scripts', 'linters', 'test_files')
VALID_CSS_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'valid.css')
INVALID_CSS_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'invalid.css')
class ThirdPartyCSSLintChecksManagerTests(test_utils.LinterTestBase):
"""Tests for ThirdPartyCSSLintChecksManager class."""
def test_all_filepaths_with_success(self):
filepaths = [VALID_CSS_FILEPATH, INVALID_CSS_FILEPATH]
third_party_linter = css_linter.ThirdPartyCSSLintChecksManager(
CONFIG_PATH, filepaths)
returned_filepaths = third_party_linter.all_filepaths
self.assertEqual(returned_filepaths, filepaths)
def test_perform_all_lint_checks_with_invalid_file(self):
third_party_linter = css_linter.ThirdPartyCSSLintChecksManager(
CONFIG_PATH, [INVALID_CSS_FILEPATH])
lint_task_report = third_party_linter.lint_css_files()
self.assert_same_list_elements([
'19:16',
'Unexpected whitespace before \":\" declaration-colon-space-'
'before'], lint_task_report.get_report())
self.assertEqual('Stylelint', lint_task_report.name)
self.assertTrue(lint_task_report.failed)
def test_perform_all_lint_checks_with_invalid_stylelint_path(self):
def mock_join(*unused_args):
return 'node_modules/stylelint/bin/stylelinter.js'
join_swap = self.swap(os.path, 'join', mock_join)
third_party_linter = css_linter.ThirdPartyCSSLintChecksManager(
CONFIG_PATH, [INVALID_CSS_FILEPATH])
with self.print_swap, join_swap, self.assertRaisesRegexp(
Exception,
'ERROR Please run start.sh first to install node-eslint or '
'node-stylelint and its dependencies.'):
third_party_linter.perform_all_lint_checks()
def test_perform_all_lint_checks_with_stderr(self):
def mock_popen(unused_commands, stdout, stderr): # pylint: disable=unused-argument
return scripts_test_utils.PopenStub(stdout=b'True', stderr=b'True')
popen_swap = self.swap_with_checks(subprocess, 'Popen', mock_popen)
third_party_linter = css_linter.ThirdPartyCSSLintChecksManager(
CONFIG_PATH, [VALID_CSS_FILEPATH])
with self.print_swap, popen_swap, self.assertRaisesRegexp(
Exception, 'True'
):
third_party_linter.perform_all_lint_checks()
def test_perform_all_lint_checks_with_no_files(self):
third_party_linter = css_linter.ThirdPartyCSSLintChecksManager(
CONFIG_PATH, [])
lint_task_report = third_party_linter.perform_all_lint_checks()
self.assertEqual(
'There are no HTML or CSS files to lint.',
lint_task_report[0].get_report()[0])
self.assertEqual('CSS lint', lint_task_report[0].name)
self.assertFalse(lint_task_report[0].failed)
def test_perform_all_lint_checks_with_valid_file(self):
third_party_linter = css_linter.ThirdPartyCSSLintChecksManager(
CONFIG_PATH, [VALID_CSS_FILEPATH])
lint_task_report = third_party_linter.perform_all_lint_checks()
self.assertTrue(isinstance(lint_task_report, list))
def test_get_linters(self):
custom_linter, third_party_linter = css_linter.get_linters(
CONFIG_PATH, [VALID_CSS_FILEPATH, INVALID_CSS_FILEPATH])
self.assertEqual(custom_linter, None)
self.assertTrue(
isinstance(
third_party_linter, css_linter.ThirdPartyCSSLintChecksManager))
|
akrherz/iem
|
scripts/dbutil/compute_hads_sts.py
|
Python
|
mit
| 2,162
| 0
|
"""Compute the archive start time of a HADS/DCP/COOP network"""
import sys
import datetime
from pyiem.network import Table as NetworkTable
from pyiem.util import get_dbconn, logger
LOG = logger()
THISYEAR = datetime.datetime.now().year
HADSDB = get_dbconn("hads")
MESOSITEDB = get_dbconn("mesosite")
def get_minvalid(sid):
""" "Do sid"""
cursor = HADSDB.cursor()
for yr in range(2002, THISYEAR + 1):
cursor.execute(
f"SELECT min(valid) from raw{yr} WHERE station = %s", (sid,)
|
)
minv = cursor.fetchone()[0]
if minv is not None:
|
return minv
def do_network(network):
"""Do network"""
nt = NetworkTable(network)
for sid in nt.sts:
sts = get_minvalid(sid)
if sts is None:
continue
if (
nt.sts[sid]["archive_begin"] is None
or nt.sts[sid]["archive_begin"] != sts
):
osts = nt.sts[sid]["archive_begin"]
fmt = "%Y-%m-%d %H:%M"
LOG.info(
"%s [%s] new sts: %s OLD sts: %s",
sid,
network,
sts.strftime(fmt),
osts.strftime(fmt) if osts is not None else "null",
)
cursor = MESOSITEDB.cursor()
cursor.execute(
"UPDATE stations SET archive_begin = %s WHERE id = %s and "
"network = %s",
(sts, sid, network),
)
cursor.close()
MESOSITEDB.commit()
def main(argv):
"""Go main Go"""
if len(argv) == 1:
# If we run without args, we pick a "random" network!
cursor = MESOSITEDB.cursor()
cursor.execute(
"SELECT id from networks where id ~* 'DCP' or id ~* 'COOP' "
"ORDER by id ASC"
)
networks = []
for row in cursor:
networks.append(row[0])
jday = int(datetime.date.today().strftime("%j"))
network = networks[jday % len(networks)]
LOG.info("auto-picked %s", network)
else:
network = argv[1]
do_network(network)
if __name__ == "__main__":
main(sys.argv)
|
Rotendahl/DormitoryLife
|
cashier/migrations/0015_auto_20171108_1351.py
|
Python
|
gpl-3.0
| 552
| 0.001818
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-11-08 12:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
depende
|
ncies = [
('cashier', '0014_fix_transaction'),
]
operations = [
migrations.AlterField(
|
model_name='transaction',
name='typeOfTransaction',
field=models.CharField(choices=[('debt', 'Gæld'), ('expense', 'Udlæg')], default='debt', max_length=7, verbose_name='Type'),
),
]
|
mokieyue/mopidy
|
mopidy/core/listener.py
|
Python
|
apache-2.0
| 5,283
| 0
|
from __future__ import absolute_import, unicode_literals
from mopidy import listener
class CoreListener(listener.Listener):
"""
Marker interface for recipients of events sent by the core actor.
Any Pykka actor that mixes in this class will receive calls to the methods
defined here when the corresponding events happen in the core actor. This
interface is used both for looking up what actors to notify of the events,
and for providing default implementations for those listeners that are not
interested in all events.
"""
@staticmethod
def send(event, **kwargs):
"""Helper to allow calling of core listener events"""
listener.send(CoreListener, event, **kwargs)
def on_event(self, event, **kwargs):
"""
Called on all events.
*MAY* be implemented by actor. By default, this method forwards the
event to the specific event methods.
:param event: the event name
:type event: string
:param kwargs: any other arguments to the specific event handlers
"""
# Just delegate to parent, entry mostly for docs.
super(CoreListener, self).on_event(event, **kwargs)
def track_playback_paused(self, tl_track, time_position):
"""
Called whenever track playback is paused.
*MAY* be implemented by actor.
:param tl_track: the track that was playing when playback paused
:type tl_track: :class:`mopidy.models.TlTrack`
:param time_position: the time position in milliseconds
:type time_position: int
"""
pass
def track_playback_resumed(self, tl_track, time_position):
"""
Called whenever track playback is resumed.
*MAY* be implemented by actor.
:param tl_track: the track that was playing when playback resumed
:type tl_track: :class:`mopidy.models.TlTrack`
:param time_position: the time position in milliseconds
:type time_position: int
"""
pass
def track_playback_started(self, tl_tr
|
ack):
"""
Called whenever a new track starts playing.
*MAY* be implemented by actor.
:param tl_track: the track that just started playing
:type t
|
l_track: :class:`mopidy.models.TlTrack`
"""
pass
def track_playback_ended(self, tl_track, time_position):
"""
Called whenever playback of a track ends.
*MAY* be implemented by actor.
:param tl_track: the track that was played before playback stopped
:type tl_track: :class:`mopidy.models.TlTrack`
:param time_position: the time position in milliseconds
:type time_position: int
"""
pass
def playback_state_changed(self, old_state, new_state):
"""
Called whenever playback state is changed.
*MAY* be implemented by actor.
:param old_state: the state before the change
:type old_state: string from :class:`mopidy.core.PlaybackState` field
:param new_state: the state after the change
:type new_state: string from :class:`mopidy.core.PlaybackState` field
"""
pass
def tracklist_changed(self):
"""
Called whenever the tracklist is changed.
*MAY* be implemented by actor.
"""
pass
def playlists_loaded(self):
"""
Called when playlists are loaded or refreshed.
*MAY* be implemented by actor.
"""
pass
def playlist_changed(self, playlist):
"""
Called whenever a playlist is changed.
*MAY* be implemented by actor.
:param playlist: the changed playlist
:type playlist: :class:`mopidy.models.Playlist`
"""
pass
def playlist_deleted(self, uri):
"""
Called whenever a playlist is deleted.
*MAY* be implemented by actor.
:param uri: the URI of the deleted playlist
:type uri: string
"""
pass
def options_changed(self):
"""
Called whenever an option is changed.
*MAY* be implemented by actor.
"""
pass
def volume_changed(self, volume):
"""
Called whenever the volume is changed.
*MAY* be implemented by actor.
:param volume: the new volume in the range [0..100]
:type volume: int
"""
pass
def mute_changed(self, mute):
"""
Called whenever the mute state is changed.
*MAY* be implemented by actor.
:param mute: the new mute state
:type mute: boolean
"""
pass
def seeked(self, time_position):
"""
Called whenever the time position changes by an unexpected amount, e.g.
at seek to a new time position.
*MAY* be implemented by actor.
:param time_position: the position that was seeked to in milliseconds
:type time_position: int
"""
pass
def stream_title_changed(self, title):
"""
Called whenever the currently playing stream title changes.
*MAY* be implemented by actor.
:param title: the new stream title
:type title: string
"""
pass
|
BanzaiTokyo/akihabara-tokyo
|
askapp/migrations/0013_auto_20170206_0748.py
|
Python
|
apache-2.0
| 542
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('askapp', '0012_au
|
to_20170203_1436'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='the_answer',
new_name='is_answer',
),
migrations.AddField(
model_name='post',
na
|
me='accepted',
field=models.DateTimeField(null=True),
),
]
|
joshuamckenty/yolo-octo-wookie
|
nova/endpoint/images.py
|
Python
|
apache-2.0
| 2,782
| 0.002516
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
#
|
Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" B
|
ASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Proxy AMI-related calls from the cloud controller, to the running
objectstore daemon.
"""
import json
import random
import urllib
from nova import vendor
import boto
import boto.s3
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
def modify(context, image_id, operation):
conn(context).make_request(
method='POST',
bucket='_images',
query_args=qs({'image_id': image_id, 'operation': operation}))
return True
def register(context, image_location):
""" rpc call to register a new image based from a manifest """
image_id = utils.generate_uid('ami')
conn(context).make_request(
method='PUT',
bucket='_images',
query_args=qs({'image_location': image_location,
'image_id': image_id}))
return image_id
def list(context, filter_list=[]):
""" return a list of all images that a user can see
optionally filtered by a list of image_id """
# FIXME: send along the list of only_images to check for
response = conn(context).make_request(
method='GET',
bucket='_images')
result = json.loads(response.read())
if not filter_list is None:
return [i for i in result if i['imageId'] in filter_list]
return result
def deregister(context, image_id):
""" unregister an image """
conn(context).make_request(
method='DELETE',
bucket='_images',
query_args=qs({'image_id': image_id}))
def conn(context):
return boto.s3.connection.S3Connection (
aws_access_key_id='%s:%s' % (context.user.access, context.project.name),
aws_secret_access_key=context.user.secret,
is_secure=False,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
port=FLAGS.s3_port,
host=FLAGS.s3_host)
def qs(params):
pairs = []
for key in params.keys():
pairs.append(key + '=' + urllib.quote(params[key]))
return '&'.join(pairs)
|
grahame/ealgis
|
django/ealgis/dataschema/schema_v1.py
|
Python
|
gpl-3.0
| 3,861
| 0.001554
|
from sqlalchemy.schema import (
Table,
Column,
MetaData,
ForeignKey)
from sqlalchemy.types import (
Text,
JSON,
DateTime,
Integer,
String)
from collections import defaultdict
from uuid import uuid4
import datetime
class SchemaStore:
def __init__(self):
self.metadata = defaultdict(MetaData)
self.tables = defaultdict(list)
def _import_schema(self, schema_name):
def fkey(target):
return ForeignKey(schema_name + '.' + target)
def make_uuid():
return str(uuid4())
metadata = self.metadata[schema_name]
tables = self.tables[schema_name]
tables.append(Table(
"ealgis_metadata", metadata,
Column('id', Integer, primary_key=True),
Column('name', String(256), nullable=False),
Column('family', String(256), nullable=True),
Column('uuid', String(36), nullable=False, default=make_uuid),
Column('description', Text(), nullable=False),
Column('date_created', DateTime(timezone=True), default=datetime.datetime.utcnow, nullable=False),
Column('date_published', DateTime(timezone=True), nullable=False),
schema=schema_name))
tables.append(Table(
"dependencies", metadata,
Column('id', Integer, primary_key=True),
Column('name', String(256), nullable=False),
Column('uuid', String(36), nullable=False),
schema=schema_name))
tables.append(Table(
"table_info", metadata,
Column('id', Integer, primary_key=True),
Column('name', String(256)),
Column('metadata_json', JSON()),
schema=schema_name))
tables.append(Table(
"column_info", metadata,
Column('id', Integer, primary_key=True),
Column('table_info_id', Integer, fkey('table_info.id'), nullable=False),
Column('name', String(256)),
Column('schema_name', String(256)),
Column('metadata_json', JSON()),
schema=schema_name))
tables.append(Table(
"geometry_source", metadata,
Column('id', Integer, primary_key=True),
Column('table_info_id', Integer, fkey('table_info.id'), nullable=False),
Column('gid_column', String(256)),
Column('geometry_type', String(256)),
schema=schema_name))
tables.append(Table(
"geometry_source_projection", metadata,
Column('id', Integer, primary_key=True),
Column('geometry_source_id', Integer, fkey('table_info.id'), nullable=False),
Column('geometry_column', String(256)),
Column('srid', Integer),
schema=schema_name))
tables.append(Table(
"geometry_linkage", metadata,
|
Column('id', Integer, primary_key=True),
# in the source schema: may not be the same schema as this Table instance
Column('geometry_source_schema_name', St
|
ring, nullable=False),
Column('geometry_source_id', Integer, nullable=False),
# these must be in this schema
Column('attr_table_id', Integer, fkey('table_info.id'), nullable=False),
Column('attr_column', String(256)),
schema=schema_name))
tables.append(Table(
"mailbox", metadata,
Column('id', Integer, primary_key=True),
Column('from', String(256)),
Column('to', String(256)),
Column('message', JSON()),
schema=schema_name))
def load_schema(self, schema_name):
if schema_name not in self.metadata:
self._import_schema(schema_name)
return self.metadata[schema_name], self.tables[schema_name]
store = SchemaStore()
|
slackapi/python-slackclient
|
slack_sdk/__init__.py
|
Python
|
mit
| 1,429
| 0.002099
|
"""
* The SDK website: https://slack.dev/python-slack-sdk/
* PyPI package: https://pypi.org/project/slack-sdk/
Here is the list of key modules in this SDK:
#### Web API Client
* Web API client: `slack_sdk.web.client`
* asyncio-based Web API client: `slack_sdk.web.async_client`
#### Webhook / response_url Client
* Webhook client: `slack_sdk.webhook.client`
* asyncio-based Webhook client: `slack_sdk.webhook.async_client`
#### Socket Mode Client
* The built-in Socket Mode client: `slack_sdk.socket_mode.builtin.client`
* [aiohttp](https://pypi.org/project/aiohttp/) based client: `slack_sdk.socket_mode.aiohttp`
* [websocket_client](https://pypi.org/project/websocket-client/) based client: `slack_sdk.socket_mode.websocket_client`
* [websockets](https://pypi.org/project/websockets/) based client: `slack_sdk.socket_mode.websockets`
#### OAuth
* `slack_sdk.oauth.installation_store.installation_store`
* `slack_sdk.oauth.state_store`
#### Audit Logs API Client
* `slack_sdk.audit_logs.v1.client`
* `slack_sdk.audit_logs.v1.async_client`
#### SCIM API Client
* `slack_sdk.scim.v1.client`
* `slack_sdk.scim.v1.async_client`
"""
import logging
from logging import NullHandler
# from .rtm import RTMClient # noqa
from .web import WebClient # noqa
from .webhook import WebhookClient # noqa
# Set default logging handler to avoid "No handler found" warnin
|
gs.
logging.getLogger(__name__).
|
addHandler(NullHandler())
|
uclouvain/osis
|
base/migrations/0063_populate_uuid_values.py
|
Python
|
agpl-3.0
| 969
| 0.002064
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-09-05 09:13
from __future__ import unicode_literals
import uuid
from django.core.exceptions import FieldDoesNotExist
from django.db import migrations
def set_uuid_field(apps, schema_editor):
"""
Set a random uuid value to all existing rows in all models containing an 'uuid' attribute in database.
"""
base = apps.get_app_config('base')
for model_class in base.get_models():
ids = model_class.objects.values_list('id', flat=True)
if ids:
for pk in ids:
try:
model_class.objects.filter(pk=pk).update(uuid=uuid.uuid4())
except FieldDoesNotExist:
break
class Migration(migrations.Migration):
dependencies =
|
[
('base', '0062_add_uuid_field'),
|
]
operations = [
migrations.RunPython(set_uuid_field, elidable=True, reverse_code=migrations.RunPython.noop),
]
|
SpaceGroupUCL/qgisSpaceSyntaxToolkit
|
esstoolkit/external/pyqtgraph/multiprocess/parallelizer.py
|
Python
|
gpl-3.0
| 12,494
| 0.014647
|
# -*- coding: utf-8 -*-
import os
|
, sys, time, multiprocessing, re
from .processes import ForkedProcess
from .remoteproxy import ClosedError
from ..python2_3 import basestring, xrange
class CanceledError(Exception):
"""Raised when the progress dialog is canceled during a processing operation."""
pass
class Parallelize(object):
"""
Class for ultra-simple inline paralleliza
|
tion on multi-core CPUs
Example::
## Here is the serial (single-process) task:
tasks = [1, 2, 4, 8]
results = []
for task in tasks:
result = processTask(task)
results.append(result)
print(results)
## Here is the parallelized version:
tasks = [1, 2, 4, 8]
results = []
with Parallelize(tasks, workers=4, results=results) as tasker:
for task in tasker:
result = processTask(task)
tasker.results.append(result)
print(results)
The only major caveat is that *result* in the example above must be picklable,
since it is automatically sent via pipe back to the parent process.
"""
def __init__(self, tasks=None, workers=None, block=True, progressDialog=None, randomReseed=True, **kwds):
"""
=============== ===================================================================
**Arguments:**
tasks list of objects to be processed (Parallelize will determine how to
distribute the tasks). If unspecified, then each worker will receive
a single task with a unique id number.
workers number of worker processes or None to use number of CPUs in the
system
progressDialog optional dict of arguments for ProgressDialog
to update while tasks are processed
randomReseed If True, each forked process will reseed its random number generator
to ensure independent results. Works with the built-in random
and numpy.random.
kwds objects to be shared by proxy with child processes (they will
appear as attributes of the tasker)
=============== ===================================================================
"""
## Generate progress dialog.
## Note that we want to avoid letting forked child processes play with progress dialogs..
self.showProgress = False
if progressDialog is not None:
self.showProgress = True
if isinstance(progressDialog, basestring):
progressDialog = {'labelText': progressDialog}
from ..widgets.ProgressDialog import ProgressDialog
self.progressDlg = ProgressDialog(**progressDialog)
if workers is None:
workers = self.suggestedWorkerCount()
if not hasattr(os, 'fork'):
workers = 1
self.workers = workers
if tasks is None:
tasks = range(workers)
self.tasks = list(tasks)
self.reseed = randomReseed
self.kwds = kwds.copy()
self.kwds['_taskStarted'] = self._taskStarted
def __enter__(self):
self.proc = None
if self.workers == 1:
return self.runSerial()
else:
return self.runParallel()
def __exit__(self, *exc_info):
if self.proc is not None: ## worker
exceptOccurred = exc_info[0] is not None ## hit an exception during processing.
try:
if exceptOccurred:
sys.excepthook(*exc_info)
finally:
#print os.getpid(), 'exit'
os._exit(1 if exceptOccurred else 0)
else: ## parent
if self.showProgress:
try:
self.progressDlg.__exit__(None, None, None)
except Exception:
pass
def runSerial(self):
if self.showProgress:
self.progressDlg.__enter__()
self.progressDlg.setMaximum(len(self.tasks))
self.progress = {os.getpid(): []}
return Tasker(self, None, self.tasks, self.kwds)
def runParallel(self):
self.childs = []
## break up tasks into one set per worker
workers = self.workers
chunks = [[] for i in xrange(workers)]
i = 0
for i in range(len(self.tasks)):
chunks[i%workers].append(self.tasks[i])
## fork and assign tasks to each worker
for i in range(workers):
proc = ForkedProcess(target=None, preProxy=self.kwds, randomReseed=self.reseed)
if not proc.isParent:
self.proc = proc
return Tasker(self, proc, chunks[i], proc.forkedProxies)
else:
self.childs.append(proc)
## Keep track of the progress of each worker independently.
self.progress = dict([(ch.childPid, []) for ch in self.childs])
## for each child process, self.progress[pid] is a list
## of task indexes. The last index is the task currently being
## processed; all others are finished.
try:
if self.showProgress:
self.progressDlg.__enter__()
self.progressDlg.setMaximum(len(self.tasks))
## process events from workers until all have exited.
activeChilds = self.childs[:]
self.exitCodes = []
pollInterval = 0.01
while len(activeChilds) > 0:
waitingChildren = 0
rem = []
for ch in activeChilds:
try:
n = ch.processRequests()
if n > 0:
waitingChildren += 1
except ClosedError:
#print ch.childPid, 'process finished'
rem.append(ch)
if self.showProgress:
self.progressDlg += 1
#print "remove:", [ch.childPid for ch in rem]
for ch in rem:
activeChilds.remove(ch)
while True:
try:
pid, exitcode = os.waitpid(ch.childPid, 0)
self.exitCodes.append(exitcode)
break
except OSError as ex:
if ex.errno == 4: ## If we get this error, just try again
continue
#print "Ignored system call interruption"
else:
raise
#print [ch.childPid for ch in activeChilds]
if self.showProgress and self.progressDlg.wasCanceled():
for ch in activeChilds:
ch.kill()
raise CanceledError()
## adjust polling interval--prefer to get exactly 1 event per poll cycle.
if waitingChildren > 1:
pollInterval *= 0.7
elif waitingChildren == 0:
pollInterval /= 0.7
pollInterval = max(min(pollInterval, 0.5), 0.0005) ## but keep it within reasonable limits
time.sleep(pollInterval)
finally:
if self.showProgress:
self.progressDlg.__exit__(None, None, None)
for ch in self.childs:
ch.join()
if len(self.exitCodes) < len(self.childs):
raise Exception("Parallelizer started %d processes but only received exit codes from %d." % (len(self.childs), len(self.exitCodes)))
for code in self.exitCodes:
if code != 0:
raise Exceptio
|
sidharta/hansel-app
|
main/auth/yahoo.py
|
Python
|
mit
| 2,275
| 0
|
# coding: utf-8
# pylint: disable=missing-docstring, invalid-name
import flask
import auth
import config
import model
from main import app
yahoo_config = dict(
access_token_url='https://api.login.yahoo.com/oauth/v2/get_token',
authorize_url='https://api.login.yahoo.com/oauth/v2/request_auth',
base_url='https://query.yahooapis.com/',
consumer_key=config.CONFIG_DB.auth_yahoo_id,
consumer_secret=config.CONFIG_DB.auth_yahoo_sec
|
ret,
request_token_url='https://api.login.yahoo.com/oauth/v2/get_request_token',
)
yahoo = auth.create_oauth_app(yahoo_config, 'yahoo')
@app.route('/_s/callback/yahoo/oauth-authorized/')
def yahoo_authorized():
response = yahoo.authorized_response()
if response is None:
flask.flash(
|
'You denied the request to sign in.')
return flask.redirect(flask.url_for('index'))
flask.session['oauth_token'] = (
response['oauth_token'],
response['oauth_token_secret'],
)
fields = 'guid, emails, familyName, givenName, nickname'
me = yahoo.get(
'/v1/yql',
data={
'format': 'json',
'q': 'select %s from social.profile where guid = me;' % fields,
'realm': 'yahooapis.com',
},
)
user_db = retrieve_user_from_yahoo(me.data['query']['results']['profile'])
return auth.signin_via_social(user_db)
@yahoo.tokengetter
def get_yahoo_oauth_token():
return flask.session.get('oauth_token')
@app.route('/signin/yahoo/')
def signin_yahoo():
return auth.signin_oauth(yahoo)
def retrieve_user_from_yahoo(response):
auth_id = 'yahoo_%s' % response['guid']
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
return user_db
names = [response.get('givenName', ''), response.get('familyName', '')]
emails = response.get('emails', {})
if not isinstance(emails, list):
emails = [emails]
emails = [e for e in emails if 'handle' in e]
emails.sort(key=lambda e: e.get('primary', False))
email = emails[0]['handle'] if emails else ''
return auth.create_or_get_user_db(
auth_id=auth_id,
name=' '.join(names).strip() or response['nickname'],
username=response['nickname'],
email=email,
verified=True,
)
|
great-expectations/great_expectations
|
tests/datasource/batch_kwarg_generator/test_s3_subdir_reader_generator.py
|
Python
|
apache-2.0
| 3,834
| 0.001826
|
import os
import time
import pandas as pd
import pytes
|
t
import requests
from botocore.session import Session
from great_expectations.datasource.batch
|
_kwargs_generator import (
S3SubdirReaderBatchKwargsGenerator,
)
port = 5555
url_host = os.getenv("GE_TEST_LOCALHOST_URL", "127.0.0.1")
endpoint_uri = f"http://{url_host}:%s/" % port
os.environ["AWS_ACCESS_KEY_ID"] = "dummy_key"
os.environ["AWS_SECRET_ACCESS_KEY"] = "dummy_secret"
@pytest.fixture(scope="module")
def s3_base():
# writable local S3 system
import shlex
import subprocess
proc = subprocess.Popen(shlex.split("moto_server s3 -p %s" % port))
timeout = 5
while timeout > 0:
try:
r = requests.get(endpoint_uri)
if r.ok:
break
except:
pass
timeout -= 0.1
time.sleep(0.1)
yield
proc.terminate()
proc.wait()
@pytest.fixture(scope="module")
def mock_s3_bucket(s3_base):
bucket = "test_bucket"
session = Session()
client = session.create_client("s3", endpoint_url=endpoint_uri)
client.create_bucket(Bucket=bucket, ACL="public-read")
df = pd.DataFrame({"c1": [1, 2, 3], "c2": ["a", "b", "c"]})
keys = [
"data/for/you.csv",
"data/for/me.csv",
]
for key in keys:
client.put_object(
Bucket=bucket, Body=df.to_csv(index=None).encode("utf-8"), Key=key
)
yield bucket
@pytest.fixture
def s3_subdir_generator(mock_s3_bucket, basic_sparkdf_datasource):
# We configure a generator that will fetch from (mocked) my_bucket
# and will use glob patterns to match returned assets into batches of the same asset
try:
generator = S3SubdirReaderBatchKwargsGenerator(
"my_generator",
datasource=basic_sparkdf_datasource,
boto3_options={"endpoint_url": endpoint_uri},
base_directory="test_bucket/data/for",
reader_options={"sep": ","},
)
yield generator
except ImportError as e:
pytest.skip(str(e))
@pytest.fixture
def s3_subdir_generator_with_partition(mock_s3_bucket, basic_sparkdf_datasource):
# We configure a generator that will fetch from (mocked) my_bucket
# and will use glob patterns to match returned assets into batches of the same asset
try:
generator = S3SubdirReaderBatchKwargsGenerator(
"my_generator",
datasource=basic_sparkdf_datasource,
boto3_options={"endpoint_url": endpoint_uri},
base_directory="test_bucket/data/",
reader_options={"sep": ","},
)
yield generator
except ImportError as e:
pytest.skip(str(e))
def test_s3_subdir_generator_basic_operation(s3_subdir_generator):
# S3 Generator sees *only* configured assets
assets = s3_subdir_generator.get_available_data_asset_names()
print(assets)
assert set(assets["names"]) == {
("you", "file"),
("me", "file"),
}
def test_s3_subdir_generator_reader_options_configuration(s3_subdir_generator):
batch_kwargs_list = [
kwargs
for kwargs in s3_subdir_generator.get_iterator(data_asset_name="you", limit=10)
]
print(batch_kwargs_list)
assert batch_kwargs_list[0]["reader_options"] == {"sep": ","}
def test_s3_subdir_generator_build_batch_kwargs_no_partition_id(s3_subdir_generator):
batch_kwargs = s3_subdir_generator.build_batch_kwargs("you")
assert batch_kwargs["s3"] in [
"s3a://test_bucket/data/for/you.csv",
]
def test_s3_subdir_generator_build_batch_kwargs_partition_id(
s3_subdir_generator_with_partition, basic_sparkdf_datasource
):
batch_kwargs = s3_subdir_generator_with_partition.build_batch_kwargs("for", "you")
assert batch_kwargs["s3"] == "s3a://test_bucket/data/for/you.csv"
|
npo-poms/pyapi
|
npoapi/utils.py
|
Python
|
gpl-3.0
| 1,076
| 0.003717
|
import os
import logging
import re
from typing import Final
logger: Final = logging.getLogger("Npo.Utils")
pattern: Final = re.compile('[a-z0-9]{2,}', re.IGNORECASE)
def looks_like_form(form: str):
"""
Checks if the given string looks like a form. E.g. it represents json, xml, a file, or 'stdin'.
Otherwise it can e.g. be interpreted as the text for search
"""
if form.startswith("{") or form.startswith("<"):
logger.debug("Detected a string that look like either json or xml")
return True
if os.path.isfile(form):
logger.debug("Detected existing file %s" % form)
return True
if form.endswith(".json") or form.endswith(".xml"):
logger.warning("Form %s looks like a file name, but it is not a file." %
|
form)
|
return True
if form == "-":
logger.debug("Detected explicit stdin")
return True
if not pattern.match(form):
logger.warning("Form does not look like a credible text search. It doesn't look like a file either though")
return False
return False
|
SP2LC/procon25-main
|
A-star/L-dynamic.py
|
Python
|
apache-2.0
| 17,085
| 0.024341
|
# -*- coding: utf-8 -*-
import pairing_heap as pheap
from copy import deepcopy,copy
import threading
import Queue
import requests
from requests.auth import HTTPDigestAuth
import json
import sys
import communication
import config
import time
import L_sprit
# グローバル変数の宣言
LIMIT_SELECTION = 0
SELECTON_RATE = 0
EXCHANGE_RATE = 0
MODE_CHANGE_THRESHOLD = 0.50
ALL_COST = 0
columns = 0
rows = 0
mode_flag = "N"
fwd_ahead = []
back_ahead = []
thresh = MODE_CHANGE_THRESHOLD
class Node :
def __init__ (self, board, selection,exchange,distance):
self.board = board
self.selection = selection
self.exchange = exchange
self.mydistance = distance
def get_next_nodes(self): #渡したノードに隣接するノードを返す
nodes_dic = {}
board = self.board
for i in range(len(board)): #選択するマスを変えたノードをキューに追加する。
for j in range(len(board[0])):
x,y = (i,j)
#右と交換
nodes_dic[((i,j),"R")] = Node(exchange(board,(x, y), (x + 1, y)) , (x + 1, y),(x,y),0)
#左と交換
if x == 0:
# 左への移動は存在しない
nodes_dic[((i,j),"L")] = Node(None, (x - 1, y), (x,y),0)
else:
# 一つ左の選択のRを流用する
#nodes_dic[((i,j),"L")] = Node(exchange(board,(x, y), (x - 1, y)) , (x - 1, y))
nodes_dic[((i,j),"L")] = Node(nodes_dic[((i - 1, j), "R")].board, (x - 1, y), (x, y),0)
#上と交換
if y == 0:
# 上への移動は存在しない
nodes_dic[((i,j),"U")] = Node(None, (x, y - 1), (x,y), 0)
else:
# 一つ上の選択のDを流用する
#nodes_dic[((i,j),"U")] = Node(exchange(board,(x, y), (x, y - 1)) , (x, y - 1))
nodes_dic[((i,j),"U")] = Node(nodes_dic[((i, j - 1), "D")].board, (x, y - 1), (x,y), 0)
#下と交換
nodes_dic[((i,j),"D")] = Node(exchange(board,(x, y), (x, y + 1)) , (x, y + 1),(x,y),0)
return nodes_dic
def make_problem(w, h):
arr = []
for i in range(w):
column = []
for j in range(h):
column.append((i, j))
arr.append(column)
return arr
def transpose(arr2d): #転置した2次元配列を返す
result = []
for i in range(len(arr2d[0])):
arr = []
for j in range(len(arr2d)):
arr.append(arr2d[j][i])
result.append(arr)
return result
def operations_to_list(operations): #operationsの型を普通のリストに戻した物を返す
pair = operations
lst = []
while pair != ():
lst.append(pair[0])
pair = pair[1]
return lst
def exchange (then_board, start, destination): # then_boadのstartとdestinationを交換したboardを返す
# 変更されたcolumnだけをdeep copyする
x, y = start
new_x, new_y = destination
if not(0 <= new_x < len(then_board) and 0 <= new_y < len(then_board[0])):
return None
startImg = then_board[x][y]
destImg = then_board[new_x][new_y]
return [
then_board[x] if x != start[0] and x != destination[0]
else [destImg if (x, y) == start
else (startImg if (x, y) == destination else then_board[x][y])
for y in range(len(then_board[0]))]
for x in range(len(then_board))]
board = copy(then_board)
board[x] = deepcopy(then_board[x])
if x != new_x:
board[new_x] = deepcopy(then_board[new_x])
destination_element = board[new_x][new_y]
board[new_x][new_y] = board[x][y]
board[x][y] = destination_element
return board
def create_distance_table(goal): #距離計算用のテーブルを返す
table = []
for i in range(len(goal)):
col = []
for j in range(len(goal[0])):
col
|
.append(None)
table.append(col)
for i in range(len(goal)):
for j in range(len(goal[0])):
(goal_x, goal_y) = goal[i][j]
table[goal_x][goal_y] = (i, j)
return table
def distance_to_goal(table, board): #ノードとゴールノードまでの予測距離を返す。引数は(距離計算用テーブル,ゴールのボード)
ans = 0
for i in range(len(board)):
for j in range(len(board[0])):
(board_x, board_y) = board[i][j]
a = table[board_x
|
][board_y]
b = (i, j)
x = abs(a[0] - b[0])
y = abs(a[1] - b[1])
ans += x + y
return ans * EXCHANGE_RATE
def point_md(point,board, table):
table_x, table_y = board[point[0]][point[1]]
a = table[table_x][table_y]
x = abs(a[0] - point[0])
y = abs(a[1] - point[1])
ans = x + y
return ans
def fast_distance_to_goal(looking_node,node, table):
parent_distance = looking_node.mydistance
parent_board = looking_node.board
selection = node.selection
exchange = node.exchange
child_board = node.board
exchange_distance = point_md(selection,parent_board, table) - point_md(exchange ,child_board, table)
selection_distance = point_md(exchange ,parent_board, table) - point_md(selection,child_board, table)
child_distance = parent_distance - (exchange_distance + selection_distance)
node.mydistance = child_distance
return child_distance * EXCHANGE_RATE
def tuplenode (node) : #ノードをtupleの形にした物を返す
return (tuple([tuple(a) for a in node.board]) , node.selection)
def caliculate_cost (operations): #現在のoperationsのコストを返す
pair = operations
cost = 0
lst = []
while pair != ():
if pair[0][0] == "S":
cost += SELECTON_RATE
else:
cost += EXCHANGE_RATE
pair = pair[1]
return cost
def count_missmatch_image(board1, board2):#board1とboard2間の不一致画像の数を返す
counts = 0
for i in range(len(board1)):
for j in range(len(board1[0])):
try:
if board1[i][j] != board2[i][j]:
counts += 1
except:
print "----"
print board1
print board2
sys.exit()
return counts
def count_selection(operations): #選択を数える
count = 0
for op in operations:
if op[0] == "S":
count += 1
return count
def encode_answer_format(operations_list,L_answer_text):
selectcount = 1
changecount = 0
ans = ""
word = ""
for i in range(len(operations_list)):
if((operations_list[i] == "L")or(operations_list[i] == "R")or(operations_list[i] == "U")or(operations_list[i] == "D")):
word += operations_list[i]
changecount +=1
else:
ans = "\r\n" + word[::-1] + ans
ans = "\r\n" + str(changecount) +ans
ans = "\r\n" + operations_list[i][1:] + ans
word = ""
changecount = 0
selectcount += 1
ans = str(selectcount) + "\r\n" +L_answer_text+ ans
return ans
# リストの先頭から順番に実行する
def move_position(move_list, pos):
pos = list(pos)
for move in move_list:
if move == "L":
pos[0] -= 1
elif move == "R":
pos[0] += 1
elif move == "U":
pos[1] -= 1
elif move == "D":
pos[1] += 1
return tuple(pos)
def reverse_operations(operations):
reverse_table = {
"L": "R",
"R": "L",
"U": "D",
"D": "U"
}
result = []
moves = []
for op in operations:
if op[0] == "S":
pos = (int(op[1], 16), int(op[2], 16))
rev_moves = [reverse_table[a] for a in moves]
new_pos = move_position(reversed(moves), pos)
new_op = "S%X%X" % new_pos
result.append(new_op)
result += rev_moves
moves = []
else:
moves.append(op)
rev_moves = [reverse_table[a] for a in moves]
result += rev_moves
return result
def astar_step(queue, checked_nodes, table, min_distance, tag, fwd_ahead, back_ahead):
dummy, looking_node, operations, selection_count = queue.pop() #キューの先頭を取り出
g_star = caliculate_cost(operations)
checked_nodes[(tuplenode(looking_node),tag)] = operations #chacked_nodes集合にチェック済みとして追加
next_nodes = looking_node.get_next_nodes() #looking_nodeに隣接するノードたち(上下左右)を辞書型でnext_nodesに追加
for key, node in next_nodes.items() : #中身全部取り出すぜー
cost = 0
select = False
if key[0] != looking_node.selection :
select = True
cost += SELECTON_RATE
added_operation = (key[1],("S%X%X"%key[0],operations))
|
daniellawrence/pdfclassification
|
main.py
|
Python
|
mit
| 2,887
| 0.002425
|
#!/usr/bin/env python
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from cStringIO import StringIO
from collections import defaultdict
import sys
import yaml
raw_classifications = open('classifications.yaml').read()
doctypes = yaml.load(raw_classifications)
def movefile(path, destination):
print "Moving file %s to %s" % (path, destination)
def convert_pdf_to_txt(path):
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = file(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos=set()
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password,caching=caching, check_extractable=True):
interpreter.process_page(page)
fp.close()
device.close()
str = retstr.getvalue()
retstr.close()
return str
def make_classification(text):
maybe_docs = defaultdict(int)
for doctypes_name, docstrings in doctypes.items():
for string in docstrings:
if string in text:
maybe_docs[doctypes_name] += text.count(string) * 10
continue
if string.lower() in text.lower():
maybe_docs[doctypes_name] += text.count(string) * 5
continue
if not maybe_docs:
classification = 'unknown'
classification_score = -99
return classification, classification_score
classification, classification_score = sorted(maybe_docs.iteritems())[0]
if classification_score < 50:
classification = 'unsure'
classification_score = -1
return classification, classification_score
def findbarcode(pdf):
import os
os.popen("rm /tmp/x*.
|
png").read()
os.popen("convert -density 300 %s /tmp/x.png" % pdf).read()
|
barcode = os.popen("zbarimg -q /tmp/x*.png").read().strip()
if barcode:
print "%s has a barcode of %s" % (pdf, barcode)
def main():
import os
pdffiles = []
if len(sys.argv) == 1:
for root, dirnames, filenames in os.walk("/home/dannyla"):
for filename in filenames:
if filename.lower().endswith('pdf'):
pdffiles.append(os.path.join(root, filename))
else:
pdffiles = sys.argv[1:]
for pdf in pdffiles:
pdf_strings = convert_pdf_to_txt(pdf)
classification, classification_score = make_classification(pdf_strings)
print "%s is a %s document (score:%d)" % (pdf, classification, classification_score)
findbarcode(pdf)
movefile(pdf, classification)
if __name__ == '__main__':
main()
|
eestay/edx-platform
|
lms/djangoapps/courseware/tests/test_middleware.py
|
Python
|
agpl-3.0
| 1,873
| 0
|
"""
Tests for courseware middleware
"""
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from django.http import Http404
from mock import patch
import courseware.courses as courses
from courseware.middlewar
|
e import RedirectUnenrolledMiddleware
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class CoursewareMiddlewareTestCase(ModuleStoreTestCase):
"""Tests that courseware mi
|
ddleware is correctly redirected"""
def setUp(self):
super(CoursewareMiddlewareTestCase, self).setUp()
self.course = CourseFactory.create()
def check_user_not_enrolled_redirect(self):
"""A UserNotEnrolled exception should trigger a redirect"""
request = RequestFactory().get("dummy_url")
response = RedirectUnenrolledMiddleware().process_exception(
request, courses.UserNotEnrolled(self.course.id)
)
self.assertEqual(response.status_code, 302)
# make sure we redirect to the course about page
expected_url = reverse(
"about_course", args=[self.course.id.to_deprecated_string()]
)
target_url = response._headers['location'][1]
self.assertTrue(target_url.endswith(expected_url))
def test_user_not_enrolled_redirect(self):
self.check_user_not_enrolled_redirect()
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_MKTG_SITE": True})
def test_user_not_enrolled_redirect_mktg(self):
self.check_user_not_enrolled_redirect()
def test_process_404(self):
"""A 404 should not trigger anything"""
request = RequestFactory().get("dummy_url")
response = RedirectUnenrolledMiddleware().process_exception(
request, Http404()
)
self.assertIsNone(response)
|
csb-toolbox/CSB
|
csb/bio/io/mrc.py
|
Python
|
mit
| 9,732
| 0.009453
|
"""
Cryo-EM density map I/O
@warning: dragons ahead, this module is experimental
"""
import numpy
import struct
class DensityMapFormatError(ValueError):
pass
class ByteOrder(object):
NATIVE = '='
LITTLE = '<'
BIG = '>'
class DensityInfo(object):
def __init__(self, data, spacing, origin, shape=None, header=None, axes=None):
self.data = data
self.spacing = spacing
self.origin = origin
self.header = header
self.shape = shape
self.axes = axes
if shape is None and data is not None:
self.shape = self.data.shape
class HeaderInfo(object):
def __init__(self, fields):
fields = tuple(fields)
if not len(fields) == 25:
raise ValueError(fields)
self._fields = fields
def __getitem__(self, i):
return self._fields[i]
def __iter__(self):
return iter(self._fields)
@property
def nc(self):
return self._fields[0]
@property
def nr(self):
return self._fields[1]
@property
def ns(self):
return self._fields[2]
@property
def mode(self):
return self._fields[3]
@property
def ncstart(self):
return self._fields[4]
@property
def nrstart(self):
return self._fields[5]
@property
def nsstart(self):
return self._fields[6]
@property
def nx(self):
return self._fields[7]
@property
def ny(self):
return self._fields[8]
@property
def nz(self):
return self._fields[9]
@property
def x(self):
return self._fields[10]
@property
def y(self):
return self._fields[11]
@property
def z(self):
return self._fields[12]
@property
def alpha(self):
return self._fields[13]
@property
def beta(self):
return self._fields[14]
@property
def gamma(self):
return self._fields[15]
@property
def mapc(self):
return self._fields[16]
@property
def mapr(self):
return self._fields[17]
@property
def maps(self):
return self._fields[18]
@property
def amin(self):
return self._fields[19]
@property
def amax(self):
return self._fields[20]
@property
def amean(self):
return self._fields[21]
@property
def ispg(self):
return self._fields[22]
@property
def nsymbt(self):
return self._fields[23]
@property
def lskflg(self):
return self._fields[24]
class DensityMapReader(object):
"""
Binary MRC density map reader.
@param filename: input MRC file name
@type filename: str
"""
HEADER_SIZE = 1024
def __init__(self, filename):
self._filename = filename
@property
def filename(self):
"""
Input MRC file name
@rtype: str
"""
return self._filename
|
def _rawheader(self, stream):
"""
Read and return the raw binary header.
"""
raw = stream.read(DensityMapReader.HEADER_SIZE)
return bytes(raw)
def _inspect(self, rawheader, order):
"""
Parse a raw binary hea
|
der.
"""
format = '{0}10l6f3l3f3l'.format(order)
fields = struct.unpack(format, rawheader[:4 * 25])
return HeaderInfo(fields)
def _spacing(self, header):
if header.nx != 0 and header.ny != 0 and header.nz != 0:
return (header.x / header.nx, header.y / header.ny, header.z / header.nz)
else:
return (0, 0, 0)
def _origin(self, header, spacing=None):
if spacing is None:
spacing = self._spacing(header)
origin = header.ncstart, header.nrstart, header.nsstart
return [origin[i] * spacing[i] for i in range(3)]
def _shape(self, header):
return (header.ns, header.nr, header.nc)
def inspect_header(self, order=ByteOrder.NATIVE):
"""
Parse the raw binary header of the density map.
@param order: byte order (defaults to L{ByteOrder.NATIVE})
@type order: str
@return: header information
@rtype: L{HeaderInfo}
"""
with open(self.filename, 'rb') as stream:
raw = self._rawheader(stream)
return self._inspect(raw, order)
def read_header(self):
"""
Read the header of the density map only.
@return: density info without any actual data (density.data is None)
@rtype: L{DensityInfo}
"""
with open(self.filename, 'rb') as stream:
raw = self._rawheader(stream)
header = self._inspect(raw, ByteOrder.NATIVE)
spacing = self._spacing(header)
origin = self._origin(header, spacing)
shape = self._shape(header)
return DensityInfo(None, spacing, origin, shape=shape, header=raw)
def read(self):
"""
Read the entire density map.
@return: complete density info
@rtype: L{DensityInfo}
"""
with open(self.filename, 'rb') as stream:
raw = self._rawheader(stream)
header = self._inspect(raw, ByteOrder.NATIVE)
if header.mode == 2 or header.mode == 1:
byte_order = ByteOrder.NATIVE
elif header.mode == 33554432:
header = self._inspect(raw, ByteOrder.BIG)
byte_order = ByteOrder.BIG
if header.mode == 33554432:
header = self._inspect(raw, ByteOrder.LITTLE)
byte_order = ByteOrder.LITTLE
else:
raise DensityMapFormatError("Not a mode 2 CCP4 map file")
stream.read(header.nsymbt) # symmetry_data
count = header.ns * header.nr * header.nc
map_data = stream.read(4 * count)
if byte_order == ByteOrder.NATIVE:
array = numpy.fromstring(map_data, numpy.float32, count)
else:
array = numpy.zeros((count,), numpy.float32)
index = 0
while len(map_data) >= 4 * 10000:
values = struct.unpack(byte_order + '10000f', map_data[:4 * 10000])
array[index:index + 10000] = numpy.array(values, numpy.float32)
index += 10000
map_data = map_data[4 * 10000:]
values = struct.unpack(byte_order + '%df' % (len(map_data) / 4), map_data)
array[index:] = numpy.array(values, numpy.float32)
del map_data
array.shape = self._shape(header)
data = array.T
spacing = self._spacing(header)
origin = self._origin(header, spacing)
return DensityInfo(data, spacing, origin, header=raw)
class DensityMapWriter(object):
"""
Binary MRC density map writer.
"""
def reconstruct_header(self, density):
"""
Attempt to reconstruct the header, given L{DensityInfo}'s
data shape, spacing and origin.
@param density: density info
@type density: L{DensityInfo}
@return: reconstructed binary header
@rtype: bytes
"""
N = list(density.data.shape)
MODE = 2
if isinstance(density.spacing, float):
spacing = 3 * [density.spacing]
else:
spacing = density.spacing
if density.origin is None:
origin = 3 * [0.]
else:
origin = density.origin
if density.axes is None:
MAP = list(range(1, 4))
else:
MAP = list(density.axes)
start = [int(round(origin[i] / spacing[i], 0)) for i in range(3)]
M = [density.data.sh
|
gully/PyKE
|
pyke/__init__.py
|
Python
|
mit
| 1,296
| 0.036265
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
PACKAGEDIR = os.path.abspath(os.path.dirname(__file__))
import matplotlib
matplotlib.use('TkAgg')
from .version import _
|
_version__
from .keparray import *
from .kepbls import *
from .kepclean import *
from .kepclip import *
from .kepconvert import *
from .kepcotrend import *
from .kepdetrend import *
from .kepdiffim import *
from .kepdraw import *
from .kepdynamic import *
from .kepextract import *
from .kepfilter import *
from .kepfit import *
from .kepflatt
|
en import *
from .kepfold import *
from .kepfourier import *
from .kepperiodogram import *
from .kepfunc import *
from .kephead import *
from .kepimages import *
from .kepio import *
from .kepkey import *
from .kepmask import *
from .kepmsg import *
from .kepoutlier import *
from .keppca import *
from .keppixseries import *
from .kepplot import *
from .kepprf import *
from .kepprfphot import *
from .keprange import *
from .kepsff import *
from .kepsmooth import *
from .kepstat import *
from .kepstddev import *
from .kepstitch import *
from .keptimefix import *
from .keptrial import *
from .keptrim import *
from .kepwindow import *
from .prf import *
from .lightcurve import *
from .targetpixelfile import *
from .utils import *
|
amol-mandhane/konnactivity
|
settings/__init__.py
|
Python
|
mit
| 71
| 0.014085
|
from
|
.base import *
from .local import *
FB_AP
|
P_ID = "557603244304943"
|
restless/django-guardian
|
guardian/admin.py
|
Python
|
bsd-2-clause
| 14,636
| 0.001366
|
from django import forms
from django.conf import settings
from guardian.compat import url, patterns
from django.contrib import admin
from django.contrib import messages
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext, ugettext_lazy as _
from guardian.forms import UserObjectPermissionsForm
from guardian.forms import GroupObjectPermissionsForm
from guardian.shortcuts import get_perms
from guardian.shortcuts import get_users_with_perms
from guardian.shortcuts import get_groups_with_perms
from guardian.shortcuts import get_perms_for_model
from guardian.models import User, Group
class AdminUserObjectPermissionsForm(UserObjectPermissionsForm):
"""
Extends :form:`UserObjectPermissionsForm`. It only overrides
``get_obj_perms_field_widget`` method so it return
``django.contrib.admin.widgets.FilteredSelectMultiple`` widget.
"""
def get_obj_perms_field_widget(self):
return FilteredSelectMultiple(_("Permissions"), False)
class AdminGroupObjectPermissionsForm(GroupObjectPermissionsForm):
"""
Extends :form:`GroupObjectPermissionsForm`. It only overrides
``get_obj_perms_field_widget`` method so it return
``django.contrib.admin.widgets.FilteredSelectMultiple`` widget.
"""
def get_obj_perms_field_widget(self):
return FilteredSelectMultiple(_("Permissions"), False)
class GuardedModelAdmin(admin.ModelAdmin):
"""
Extends ``django.contrib.admin.ModelAdmin`` class. Provides some extra
views for object permissions management at admin panel. It also changes
default ``change_form_template`` option to
``'admin/guardian/model/change_form.html'`` which is required for proper
url (object permissions related) being shown at the model pages.
**Extra options**
``GuardedModelAdmin.obj_perms_manage_template``
*Default*: ``admin/guardian/model/obj_perms_manage.html``
``GuardedModelAdmin.obj_perms_manage_user_template``
*Default*: ``admin/guardian/model/obj_perms_manage_user.html``
``GuardedModelAdmin.obj_perms_manage_group_template``
*Default*: ``admin/guardian/model/obj_perms_manage_group.html``
``GuardedModelAdmin.user_can_access_owned_objects_only``
*Default*: ``False``
If this would be set to ``True``, ``request.user`` would be used to
filter out objects he or she doesn't own (checking ``user`` field
of used model - field name may be overridden by
``user_owned_objects_field`` option.
.. note::
Please remember that this will **NOT** affect superusers!
Admins would still see all items.
``GuardedModelAdmin.user_owned_objects_field``
*Default*: ``user``
**Usage example**
Just use :admin:`GuardedModelAdmin` instead of
``django.contrib.admin.ModelAdmin``.
.. code-block:: python
from django.contrib import admin
from guardian.admin import GuardedModelAdmin
from myapp.models import Author
class AuthorAdmin(GuardedModelAdmin):
pass
admin.site.register(Author, AuthorAdmin)
"""
change_form_template = \
'admin/guardian/model/change_form.html'
obj_perms_manage_template = \
'admin/guardian/model/obj_perms_manage.html'
obj_perms_manage_user_template = \
'admin/guardian/model/obj_perms_manage_user.html'
obj_perms_manage_group_template = \
'admin/guardian/model/obj_perms_manage_group.html'
user_can_access_owned_objects_only = False
user_owned_objects_field = 'user'
def queryset(self, request):
qs = super(GuardedModelAdmin, self).queryset(request)
if self.user_can_access_owned_objects_only and \
not request.user.is_superuser:
filters = {self.user_owned_objects_field: request.user}
qs = qs.filter(**filters)
return qs
def get_urls(self):
"""
Extends standard admin model urls with the following:
- ``.../permissions/``
- ``.../permissions/user-manage/<user_id>/``
- ``.../permissions/group-manage/<group_id>/``
.. note::
``...`` above are standard, instance detail url (i.e.
``/admin/flatpages/1/``)
"""
urls = super(GuardedModelAdmin, self).get_urls()
info = self.model._meta.app_label, self.model._meta.module_name
myurls = patterns('',
url(r'^(?P<object_pk>.+)/permissions/$',
view=self.admin_site.admin_view(self.obj_perms_manage_view),
name='%s_%s_permissions' % info),
url(r'^(?P<object_pk>.+)/permissions/user-manage/(?P<user_id>\-?\d+)/$',
view=self.admin_site.admin_view(
self.obj_perms_manage_user_view),
name='%s_%s_permissions_manage_user' % info),
url(r'^(?P<object_pk>.+)/permissions/group-manage/(?P<group_id>\-?\d+)/$',
view=self.admin_site.admin_view(
self.obj_perms_manage_group_view),
name='%s_%s_permissions_manage_group' % info),
|
)
re
|
turn myurls + urls
def get_obj_perms_base_context(self, request, obj):
"""
Returns context dictionary with common admin and object permissions
related content.
"""
context = {
'adminform': {'model_admin': self},
'object': obj,
'app_label': self.model._meta.app_label,
'opts': self.model._meta,
'original': hasattr(obj, '__unicode__') and obj.__unicode__() or\
str(obj),
'has_change_permission': self.has_change_permission(request, obj),
'model_perms': get_perms_for_model(obj),
'title': _("Object permissions"),
}
return context
def obj_perms_manage_view(self, request, object_pk):
"""
Main object permissions view. Presents all users and groups with any
object permissions for the current model *instance*. Users or groups
without object permissions for related *instance* would **not** be
shown. In order to add or manage user or group one should use links or
forms presented within the page.
"""
obj = get_object_or_404(self.queryset(request), pk=object_pk)
users_perms = SortedDict(
get_users_with_perms(obj, attach_perms=True,
with_group_users=False))
users_perms.keyOrder.sort(key=lambda user: user.username)
groups_perms = SortedDict(
get_groups_with_perms(obj, attach_perms=True))
groups_perms.keyOrder.sort(key=lambda group: group.name)
if request.method == 'POST' and 'submit_manage_user' in request.POST:
user_form = UserManage(request.POST)
group_form = GroupManage()
info = (
self.admin_site.name,
self.model._meta.app_label,
self.model._meta.module_name
)
if user_form.is_valid():
user_id = user_form.cleaned_data['user'].id
url = reverse(
'%s:%s_%s_permissions_manage_user' % info,
args=[obj.pk, user_id]
)
return redirect(url)
elif request.method == 'POST' and 'submit_manage_group' in request.POST:
user_form = UserManage()
group_form = GroupManage(request.POST)
info = (
self.admin_site.name,
self.model._meta.app_label,
self.model._meta.module_name
)
if group_form.is_valid():
group_id = group_form.cleaned_data['group'].id
url = reverse(
'%s:%s_%s_permissions_manage_group' % info,
args=[obj.pk, group_id]
)
return redirect(url)
|
cobbler/koan
|
koan/configurator.py
|
Python
|
gpl-2.0
| 10,535
| 0.00019
|
"""
Configuration class.
Copyright 2010 Kelsey Hightower
Kelsey Hightower <kelsey.hightower@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
module for configuring repos, packages and files
"""
import filecmp
impo
|
rt shutil
from . import utils
import tempfile
import stat
import os.path
import sys
import time
import pwd
import grp
import json
try:
import yum
sys.path.append('/usr/share/yum-cli')
import cli
yum_availab
|
le = True
except:
yum_available = False
class KoanConfigure:
"""
Used for all configuration methods, used by koan
to configure repos, files and packages.
"""
def __init__(self, config):
"""Constructor. Requires json config object."""
self.config = json.JSONDecoder().decode(config)
self.stats = {}
(self.dist, _) = utils.os_release()
def configure_repos(self):
# Enables the possibility to use different types of repos
if yum_available and self.dist == "redhat":
self.configure_yum_repos()
def configure_yum_repos(self):
"""Configure YUM repositories."""
print("- Configuring Repos")
old_repo = '/etc/yum.repos.d/config.repo'
# Stage a tempfile to hold new file contents
_tempfile = tempfile.NamedTemporaryFile()
_tempfile.write(self.config['repo_data'])
_tempfile.flush()
new_repo = _tempfile.name
# Check if repo resource exist, create if missing
if os.path.isfile(old_repo):
if not filecmp.cmp(old_repo, new_repo):
utils.sync_file(old_repo, new_repo, 0, 0, 644)
self.stats['repos_status'] = "Success: Repos in sync"
else:
self.stats['repos_status'] = "Success: Repos in sync"
else:
print(" %s not found, creating..." % old_repo)
open(old_repo, 'w').close()
utils.sync_file(old_repo, new_repo, 0, 0, 644)
self.stats['repos_status'] = "Success: Repos in sync"
_tempfile.close()
def configure_packages(self):
# Enables the possibility to use different types of package
# configurators
if yum_available and self.dist == "redhat":
self.configure_yum_packages()
def configure_yum_packages(self):
"""Configure package resources."""
print("- Configuring Packages")
runtime_start = time.time()
nsync = 0
osync = 0
fail = 0
packages = self.config['packages']
yb = yum.YumBase()
yb.preconf.debuglevel = 0
yb.preconf.errorlevel = 0
yb.doTsSetup()
yb.doRpmDBSetup()
ybc = cli.YumBaseCli()
ybc.preconf.debuglevel = 0
ybc.preconf.errorlevel = 0
ybc.conf.assumeyes = True
ybc.doTsSetup()
ybc.doRpmDBSetup()
create_pkg_list = []
remove_pkg_list = []
for package in packages:
action = packages[package]['action']
# In the near future, will use install_name vs package
# as it includes a more specific package name: "package-version"
# install_name = packages[package]['install_name']
if yb.isPackageInstalled(package):
if action == 'create':
nsync += 1
if action == 'remove':
remove_pkg_list.append(package)
if not yb.isPackageInstalled(package):
if action == 'create':
create_pkg_list.append(package)
if action == 'remove':
nsync += 1
# Don't waste time with YUM if there is nothing to do.
doTransaction = False
if create_pkg_list:
print(" Packages out of sync: %s" % create_pkg_list)
ybc.installPkgs(create_pkg_list)
osync += len(create_pkg_list)
doTransaction = True
if remove_pkg_list:
print(" Packages out of sync: %s" % remove_pkg_list)
ybc.erasePkgs(remove_pkg_list)
osync += len(remove_pkg_list)
doTransaction = True
if doTransaction:
ybc.buildTransaction()
ybc.doTransaction()
runtime_end = time.time()
runtime = (runtime_end - runtime_start)
self.stats['pkg'] = {
'runtime': runtime,
'nsync': nsync,
'osync': osync,
'fail': fail}
def configure_directories(self):
""" Configure directory resources."""
print("- Configuring Directories")
runtime_start = time.time()
nsync = 0
osync = 0
fail = 0
files = self.config['files']
# Split out directories
_dirs = [d for d in files if files[d]['is_dir']]
# Configure directories first
for dir in _dirs:
action = files[dir]['action']
odir = files[dir]['path']
protected_dirs = [
'/',
'/bin',
'/boot',
'/dev',
'/etc',
'/lib',
'/lib64',
'/proc',
'/sbin',
'/sys',
'/usr',
'/var']
if os.path.isdir(odir):
if os.path.realpath(odir) in protected_dirs:
print(" %s is a protected directory, skipping..."
% os.path.realpath(odir))
fail += 1
continue
if action == 'create':
nmode = int(files[dir]['mode'], 8)
nuid = pwd.getpwnam(files[dir]['owner'])[2]
ngid = grp.getgrnam(files[dir]['group'])[2]
# Compare old and new directories, sync if permissions mismatch
if os.path.isdir(odir):
dstat = os.stat(odir)
omode = stat.S_IMODE(dstat.st_mode)
ouid = pwd.getpwuid(dstat.st_uid)[2]
ogid = grp.getgrgid(dstat.st_gid)[2]
if omode != nmode or ouid != nuid or ogid != ngid:
os.chmod(odir, nmode)
os.chown(odir, nuid, ngid)
osync += 1
else:
nsync += 1
else:
print(" Directory out of sync, creating %s" % odir)
os.makedirs(odir, nmode)
os.chown(odir, nuid, ngid)
osync += 1
elif action == 'remove':
if os.path.isdir(odir):
print(" Directory out of sync, removing %s" % odir)
shutil.rmtree(odir)
osync += 1
else:
nsync += 1
else:
pass
runtime_end = time.time()
runtime = (runtime_end - runtime_start)
self.stats['dir'] = {
'runtime': runtime,
'nsync': nsync,
'osync': osync,
'fail': fail}
def configure_files(self):
""" Configure file resources."""
print("- Configuring Files")
runtime_start = time.time()
nsync = 0
osync = 0
fail = 0
files = self.config['files']
# Split out files
_files = [f for f in files if files[f]['is_dir'] is False]
for file in _files:
action = files[file]['action']
ofile = files[file]['path
|
LearnEra/LearnEraPlaftform
|
lms/djangoapps/django_comment_client/utils.py
|
Python
|
agpl-3.0
| 18,621
| 0.003007
|
import pytz
from collections import defaultdict
import logging
from datetime import datetime
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import connection
from django.http import HttpResponse
from django.utils import simplejson
from django_comment_common.models import Role, FORUM_ROLE_STUDENT
from django_comment_client.permissions import check_permissions_by_view, cached_has_permission
from edxmako import lookup_template
import pystache_custom as pystache
from course_groups.cohorts import get_cohort_by_id, get_cohort_id, is_commentable_cohorted
from course_groups.models import CourseUserGroup
from xmodule.modulestore.django import modulestore
from djang
|
o.utils.timezone import UTC
from opaque_keys.edx.locations import i4xEncoder
from opaque_keys.edx.keys import CourseKey
import json
log = logging.getLogger(__name__)
def extract(dic, keys):
return {k: dic.get(k) for k in keys}
def strip_none(dic):
return dict([(k, v) for k, v in dic.iteritems() if v is not None])
def strip_blank(dic):
def _is_blank(v):
return isinstance(v, str) and len(v.strip()) == 0
return dict([(k,
|
v) for k, v in dic.iteritems() if not _is_blank(v)])
# TODO should we be checking if d1 and d2 have the same keys with different values?
def merge_dict(dic1, dic2):
return dict(dic1.items() + dic2.items())
def get_role_ids(course_id):
roles = Role.objects.filter(course_id=course_id).exclude(name=FORUM_ROLE_STUDENT)
return dict([(role.name, list(role.users.values_list('id', flat=True))) for role in roles])
def has_forum_access(uname, course_id, rolename):
try:
role = Role.objects.get(name=rolename, course_id=course_id)
except Role.DoesNotExist:
return False
return role.users.filter(username=uname).exists()
def _get_discussion_modules(course):
all_modules = modulestore().get_items(course.id, qualifiers={'category': 'discussion'})
def has_required_keys(module):
for key in ('discussion_id', 'discussion_category', 'discussion_target'):
if getattr(module, key) is None:
log.warning("Required key '%s' not in discussion %s, leaving out of category map" % (key, module.location))
return False
return True
return filter(has_required_keys, all_modules)
def _get_discussion_id_map(course):
def get_entry(module):
discussion_id = module.discussion_id
title = module.discussion_target
last_category = module.discussion_category.split("/")[-1].strip()
return (discussion_id, {"location": module.location, "title": last_category + " / " + title})
return dict(map(get_entry, _get_discussion_modules(course)))
def _filter_unstarted_categories(category_map):
now = datetime.now(UTC())
result_map = {}
unfiltered_queue = [category_map]
filtered_queue = [result_map]
while len(unfiltered_queue) > 0:
unfiltered_map = unfiltered_queue.pop()
filtered_map = filtered_queue.pop()
filtered_map["children"] = []
filtered_map["entries"] = {}
filtered_map["subcategories"] = {}
for child in unfiltered_map["children"]:
if child in unfiltered_map["entries"]:
if unfiltered_map["entries"][child]["start_date"] <= now:
filtered_map["children"].append(child)
filtered_map["entries"][child] = {}
for key in unfiltered_map["entries"][child]:
if key != "start_date":
filtered_map["entries"][child][key] = unfiltered_map["entries"][child][key]
else:
log.debug(u"Filtering out:%s with start_date: %s", child, unfiltered_map["entries"][child]["start_date"])
else:
if unfiltered_map["subcategories"][child]["start_date"] < now:
filtered_map["children"].append(child)
filtered_map["subcategories"][child] = {}
unfiltered_queue.append(unfiltered_map["subcategories"][child])
filtered_queue.append(filtered_map["subcategories"][child])
return result_map
def _sort_map_entries(category_map, sort_alpha):
things = []
for title, entry in category_map["entries"].items():
if entry["sort_key"] == None and sort_alpha:
entry["sort_key"] = title
things.append((title, entry))
for title, category in category_map["subcategories"].items():
things.append((title, category))
_sort_map_entries(category_map["subcategories"][title], sort_alpha)
category_map["children"] = [x[0] for x in sorted(things, key=lambda x: x[1]["sort_key"])]
def get_discussion_category_map(course):
course_id = course.id
unexpanded_category_map = defaultdict(list)
modules = _get_discussion_modules(course)
is_course_cohorted = course.is_cohorted
cohorted_discussion_ids = course.cohorted_discussions
for module in modules:
id = module.discussion_id
title = module.discussion_target
sort_key = module.sort_key
category = " / ".join([x.strip() for x in module.discussion_category.split("/")])
#Handle case where module.start is None
entry_start_date = module.start if module.start else datetime.max.replace(tzinfo=pytz.UTC)
unexpanded_category_map[category].append({"title": title, "id": id, "sort_key": sort_key, "start_date": entry_start_date})
category_map = {"entries": defaultdict(dict), "subcategories": defaultdict(dict)}
for category_path, entries in unexpanded_category_map.items():
node = category_map["subcategories"]
path = [x.strip() for x in category_path.split("/")]
# Find the earliest start date for the entries in this category
category_start_date = None
for entry in entries:
if category_start_date is None or entry["start_date"] < category_start_date:
category_start_date = entry["start_date"]
for level in path[:-1]:
if level not in node:
node[level] = {"subcategories": defaultdict(dict),
"entries": defaultdict(dict),
"sort_key": level,
"start_date": category_start_date}
else:
if node[level]["start_date"] > category_start_date:
node[level]["start_date"] = category_start_date
node = node[level]["subcategories"]
level = path[-1]
if level not in node:
node[level] = {"subcategories": defaultdict(dict),
"entries": defaultdict(dict),
"sort_key": level,
"start_date": category_start_date}
else:
if node[level]["start_date"] > category_start_date:
node[level]["start_date"] = category_start_date
for entry in entries:
node[level]["entries"][entry["title"]] = {"id": entry["id"],
"sort_key": entry["sort_key"],
"start_date": entry["start_date"],
"is_cohorted": is_course_cohorted}
# TODO. BUG! : course location is not unique across multiple course runs!
# (I think Kevin already noticed this) Need to send course_id with requests, store it
# in the backend.
for topic, entry in course.discussion_topics.items():
category_map['entries'][topic] = {"id": entry["id"],
"sort_key": entry.get("sort_key", topic),
"start_date": datetime.now(UTC()),
"is_cohorted": is_course_cohorted and entry["id"] in cohorted_discussion_ids}
_sort_map_entries(category_map, course.discussion_sort_alpha)
return _filter_unstarted_categories(category_map)
class JsonResponse(HttpResponse):
def __init__(self, data=None):
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.