repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
roryk/recipes
|
recipes/weeder/weeder2.py
|
Python
|
mit
| 833
| 0.0012
|
#!/usr/bin/env python
# Small wrapper
|
script for weeder2, which needs the FreqFiles directory
# where it is executed. This script allows running weeder2 from anywhere.
import os
import sys
import argparse
import subprocess as sp
# Weeder install dir
weeder_dir = os.path.realpath(os.path.join(os.path
|
.dirname(__file__), "..", "share", "weeder2"))
weeder_exe = "weeder2"
weeder_help = sp.check_output(
os.path.join(weeder_dir, weeder_exe),
stderr=sp.STDOUT).decode()
parser = argparse.ArgumentParser()
parser.add_argument("-f", dest="fname")
args, unknownargs = parser.parse_known_args()
if not args.fname:
print(weeder_help)
sys.exit()
fname = os.path.abspath(args.fname)
rest = " ".join(unknownargs)
cmd = "./{} -f {} {}".format(weeder_exe, fname, rest)
sys.exit(sp.call(cmd, shell=True, cwd=weeder_dir))
|
ader1990/cloudbase-init
|
cloudbaseinit/metadata/services/azureservice.py
|
Python
|
apache-2.0
| 18,702
| 0
|
# Copyright 2017 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import os
import socket
import time
from xml.etree import ElementTree
from oslo_log import log as oslo_logging
import six
import untangle
from cloudbaseinit import conf as cloudbaseinit_conf
from cloudbaseinit import constant
from cloudbaseinit import exception
from cloudbaseinit.metadata.services import base
from cloudbaseinit.osutils import factory as osutils_factory
from cloudbaseinit.utils import dhcp
from cloudbaseinit.utils import encoding
from cloudbaseinit.utils.windows import x509
CONF = cloudbaseinit_conf.CONF
LOG = oslo_logging.getLogger(__name__)
WIRESERVER_DHCP_OPTION = 245
WIRE_SERVER_VERSION = '2015-04-05'
GOAL_STATE_STARTED = "Started"
HEALTH_STATE_READY = "Ready"
HEALTH_STATE_NOT_READY = "NotReady"
HEALTH_SUBSTATE_PROVISIONING = "Provisioning"
HEALTH_SUBSTATE_PROVISIONING_FAILED = "ProvisioningFailed"
ROLE_PROPERTY_CERT_THUMB = "CertificateThumbprint"
OVF_ENV_DRIVE_TAG = "E6DA6616-8EC4-48E0-BE93-58CE6ACE3CFB.tag"
OVF_ENV_FILENAME = "ovf-env.xml"
CUSTOM_DATA_FILENAME = "CustomData.bin"
DATALOSS_WARNING_PATH = '$$\\OEM\\DATALOSS_WARNING_README.txt'
DEFAULT_KMS_HOST = "kms.core.windows.net"
class AzureService(base.BaseHTTPMetadataService):
def __init__(self):
super(AzureService, self).__init__(base_url=None)
self._e
|
nable_retry = True
self._goal_state = None
self._config_set_drive_path = None
self._ovf_env = None
self._headers = {"x-ms-guest-agent-name": "cloudbase-init"}
|
self._osutils = osutils_factory.get_os_utils()
def _get_wire_server_endpoint_address(self):
total_time = 300
poll_time = 5
retries = total_time / poll_time
while True:
try:
options = dhcp.get_dhcp_options()
endpoint = (options or {}).get(WIRESERVER_DHCP_OPTION)
if not endpoint:
raise exception.MetadaNotFoundException(
"Cannot find Azure WireServer endpoint address")
return socket.inet_ntoa(endpoint)
except Exception:
if not retries:
raise
time.sleep(poll_time)
retries -= 1
def _check_version_header(self):
if "x-ms-version" not in self._headers:
versions = self._get_versions()
if WIRE_SERVER_VERSION not in versions.Versions.Supported.Version:
raise exception.MetadaNotFoundException(
"Unsupported Azure WireServer version: %s" %
WIRE_SERVER_VERSION)
self._headers["x-ms-version"] = WIRE_SERVER_VERSION
def _get_versions(self):
return self._wire_server_request("?comp=Versions")
def _wire_server_request(self, path, data_xml=None, headers=None,
parse_xml=True):
if not self._base_url:
raise exception.CloudbaseInitException(
"Azure WireServer base url not set")
all_headers = self._headers.copy()
if data_xml:
all_headers["Content-Type"] = "text/xml; charset=utf-8"
if headers:
all_headers.update(headers)
data = self._exec_with_retry(
lambda: super(AzureService, self)._http_request(
path, data_xml, headers=all_headers))
if parse_xml:
return untangle.parse(six.StringIO(encoding.get_as_string(data)))
else:
return data
@staticmethod
def _encode_xml(xml_root):
bio = six.BytesIO()
ElementTree.ElementTree(xml_root).write(
bio, encoding='utf-8', xml_declaration=True)
return bio.getvalue()
def _get_health_report_xml(self, state, sub_status=None, description=None):
xml_root = ElementTree.Element('Health')
xml_goal_state_incarnation = ElementTree.SubElement(
xml_root, 'GoalStateIncarnation')
xml_goal_state_incarnation.text = str(self._get_incarnation())
xml_container = ElementTree.SubElement(xml_root, 'Container')
xml_container_id = ElementTree.SubElement(xml_container, 'ContainerId')
xml_container_id.text = self._get_container_id()
xml_role_instance_list = ElementTree.SubElement(
xml_container, 'RoleInstanceList')
xml_role = ElementTree.SubElement(xml_role_instance_list, 'Role')
xml_role_instance_id = ElementTree.SubElement(xml_role, 'InstanceId')
xml_role_instance_id.text = self._get_role_instance_id()
xml_health = ElementTree.SubElement(xml_role, 'Health')
xml_state = ElementTree.SubElement(xml_health, 'State')
xml_state.text = state
if sub_status:
xml_details = ElementTree.SubElement(xml_health, 'Details')
xml_sub_status = ElementTree.SubElement(xml_details, 'SubStatus')
xml_sub_status.text = sub_status
xml_description = ElementTree.SubElement(
xml_details, 'Description')
xml_description.text = description
return self._encode_xml(xml_root)
def _get_role_properties_xml(self, properties):
xml_root = ElementTree.Element('RoleProperties')
xml_container = ElementTree.SubElement(xml_root, 'Container')
xml_container_id = ElementTree.SubElement(xml_container, 'ContainerId')
xml_container_id.text = self._get_container_id()
xml_role_instances = ElementTree.SubElement(
xml_container, 'RoleInstances')
xml_role_instance = ElementTree.SubElement(
xml_role_instances, 'RoleInstance')
xml_role_instance_id = ElementTree.SubElement(
xml_role_instance, 'Id')
xml_role_instance_id.text = self._get_role_instance_id()
xml_role_properties = ElementTree.SubElement(
xml_role_instance, 'Properties')
for name, value in properties.items():
ElementTree.SubElement(
xml_role_properties, 'Property', name=name, value=value)
return self._encode_xml(xml_root)
def _get_goal_state(self, force_update=False):
if not self._goal_state or force_update:
self._goal_state = self._wire_server_request(
"machine?comp=goalstate").GoalState
expected_state = self._goal_state.Machine.ExpectedState
if expected_state != GOAL_STATE_STARTED:
raise exception.CloudbaseInitException(
"Invalid machine expected state: %s" % expected_state)
return self._goal_state
def _get_incarnation(self):
goal_state = self._get_goal_state()
return goal_state.Incarnation.cdata
def _get_container_id(self):
goal_state = self._get_goal_state()
return goal_state.Container.ContainerId.cdata
def _get_role_instance_config(self):
goal_state = self._get_goal_state()
role_instance = goal_state.Container.RoleInstanceList.RoleInstance
return role_instance.Configuration
def _get_role_instance_id(self):
goal_state = self._get_goal_state()
role_instance = goal_state.Container.RoleInstanceList.RoleInstance
return role_instance.InstanceId.cdata
def _post_health_status(self, state, sub_status=None, description=None):
health_report_xml = self._get_health_report_xml(
state, sub_status, description)
LOG.debug("Health data: %s", health_report_xml)
self._wire_server_request(
"machine?comp=health", health_report_xml, parse_xml=False)
def provision
|
KarimAllah/nova
|
nova/tests/test_xenapi.py
|
Python
|
apache-2.0
| 69,840
| 0.00126
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test suite for XenAPI."""
import ast
import contextlib
import datetime
import functools
import json
import os
import re
import stubout
from nova import db
from nova import context
from nova import flags
from nova import log as logging
from nova import test
from nova import utils
from nova.compute import instance_types
from nova.compute import power_state
from nova import exception
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import vm_utils
from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
from nova.tests.glance import stubs as glance_stubs
from nova.tests import fake_network
from nova.tests import fake_utils
LOG = logging.getLogger('nova.tests.test_xenapi')
FLAGS = flags.FLAGS
def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
"""
vm_utils.with_vdi_attached_here needs to be stubbed out because it
calls down to the filesystem to attach a vdi. This provides a
decorator to handle that.
"""
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
@contextlib.contextmanager
def fake_vdi_attached_here(*args, **kwargs):
fake_dev = 'fakedev'
yield fake_dev
def fake_stream_disk(*args, **kwargs):
pass
def fake_is_vdi_pv(*args, **kwargs):
return should_return
orig_vdi_attached_here = vm_utils.vdi_attached_here
orig_stream_disk = vm_utils._stream_disk
orig_is_vdi_pv = vm_utils._is_vdi_pv
try:
vm_utils.vdi_attached_here = fake_vdi_attached_here
vm_utils._stream_disk = fake_stream_disk
vm_utils._is_vdi_pv = fake_is_vdi_pv
return function(self, *args, **kwargs)
finally:
vm_utils._is_vdi_pv = orig_is_vdi_pv
vm_utils._stream_disk = orig_stream_disk
vm_utils.vdi_attached_here = orig_vdi_attached_here
return decorated_function
class XenAPIVolumeTestCase(test.TestCase):
"""Unit tests for Volume operations."""
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
self.instance_values = {'id': 1,
'project_id': self.user_id,
'user_id': 'fake',
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
def _create_volume(self, size='0'):
"""Create a volume object."""
vol = {}
vol['size'] = size
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['host'] = 'localhost'
vol['availability_zone'] = FLAGS.storage_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(self.context, vol)
@staticmethod
def _make_info():
return {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': 1,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.1:3260,fake',
'auth_method': 'CHAP',
'auth_method': 'fake',
'auth_method': 'fake',
}
}
def test_parse_volume_info_raise_exception(self):
"""This shows how to test helper classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = volume_utils.VolumeHelper
helper.XenAPI = session.get_imported_xenapi()
vol = self._create_volume()
# oops, wrong mount point!
self.assertRaises(volume_utils.StorageError,
helper.parse_volume_info,
self._make_info(),
'dev/sd'
)
db.volume_destroy(context.get_admin_context(), vol['id'])
def test_attach_volume(self):
"""This shows how to test Ops classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
vm = xenapi_fake.create_vm(instance.name, 'Running')
result = conn.attach_volume(self._make_info(),
instance.name, '/dev/sdc')
def check():
# check that the VM has a VBD attached to it
# Get XenAPI record for VBD
vbds = xenapi_fake.get_all('VBD')
vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
self.assertEqual(vm_ref, vm)
check()
def test_attach_volume_raise_exception(self):
"""This shows how to test when exceptions are raised."""
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance.name, 'Running')
self.assertRaises(Exception,
conn.attach_volume,
instance.name,
volume['id'],
'/dev/sdc')
def tearDown(self):
super(XenAPIVolumeTestCase, self).tearDown()
self.stubs.UnsetAll()
def configure_instance(*args):
pass
def _find_rescue_vbd_ref(*args):
pass
class XenAPIVMTestCase(test.TestCase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self)
|
.setUp()
self.network = utils.import_object(FLAGS.network_manager)
self.stubs = stubout.StubOutForTesting()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
instance_name_template='%d',
firewall
|
_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
xenapi_fake.reset()
xenapi_fake.create_local_srs()
xenapi_fake.create_local_pifs()
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_stream_disk(self.stubs)
stubs.stubout_is_vdi_pv(self.stubs)
self.stubs.Set(vmops.VMOps, '_configure_instance',
|
DamnWidget/anaconda_go
|
lib/cache.py
|
Python
|
gpl-3.0
| 2,829
| 0.000707
|
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import os
import json
import platform
from collections import defaultdict
from anaconda_go.lib import go
from anaconda_go.lib.plugin import typing
cachepath = {
'linux': os.path.join('~', '.local', 'share', 'anaconda', 'cache'),
'darwin': os.path.join('~', 'Library', 'Cache', 'anaconda'),
'windows': os.path.join(os.getenv('APPDATA') or '~', 'Anaconda', 'Cache')
}
cache_directory = os.path.expanduser(
cachepath.get(platform.system().lower())
)
PACKAGES_CACHE = defaultdict(lambda: [])
def append(package: typing.Dict) -> None:
"""Append the given package into the cache
"""
global PACKAGES_CACHE
if not package_in_cache(package):
PACKAGES_CACHE[go.GOROOT].append(package)
d
|
ef package_in_cache(package: typing.Dict) -> bool:
"""Look for the given p
|
ackage in the cache and return true if is there
"""
for pkg in PACKAGES_CACHE[go.GOROOT]:
if pkg['ImportPath'] == package['ImportPath']:
return True
return False
def lookup(node_name: str='') -> typing.Dict:
"""Lookup the given node_name in the cache and return it back
"""
node = {}
if node_name == '':
node = PACKAGES_CACHE[go.GOROOT]
else:
for pkg in PACKAGES_CACHE[go.GOROOT]:
guru = pkg.get('Guru')
if guru is None:
continue
path = guru['package'].get('path')
if path is not None and path == node_name:
node = guru
break
for member in guru['package'].get('members', []):
if member.get('name') == node_name:
node = member
break
for method in member.get('methods', []):
if method['name'] == node_name:
node = method
break
return node
def persist_package_cache() -> None:
"""Write the contents of the package cache for this GOROOT into the disk
"""
gopath = go.GOPATH.replace(os.path.sep, '_')
cachefile = os.path.join(cache_directory, gopath, 'packages.cache')
if not os.path.exists(os.path.dirname(cachefile)):
os.makedirs(os.path.dirname(cachefile))
with open(cachefile, 'w') as fd:
json.dump(PACKAGES_CACHE[go.GOROOT], fd)
def load_package_cache() -> typing.List:
"""Load a previously stores package cache file
"""
global PACKAGES_CACHE
gopath = go.GOPATH.replace(os.path.sep, '_')
cachefile = os.path.join(cache_directory, gopath, 'packages.cache')
try:
with open(cachefile, 'r') as fd:
PACKAGES_CACHE[go.GOROOT] = json.load(fd)
except FileNotFoundError:
pass
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/ubuntuone-client/ubuntuone/syncdaemon/hash_queue.py
|
Python
|
gpl-3.0
| 9,720
| 0.000412
|
# ubuntuone.syncdaemon.hash_queue - hash queues
#
# Authors: Facundo Batista <facundo@canonical.com>
# Guillermo Gonzalez <guillermo.gonzalez@canonical.com>
# Alejandro J. Cura <alecu@canonical.com>
#
# Copyright 2009-2011 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Module that implements the Hash Queue machinery."""
from __future__ import with_statement
import logging
import threading
import Queue
import time
from twisted.internet import reactor
from twisted.python.util import OrderedDict as TxOrderedDict
from ubuntuone.storageprotocol.content_hash import \
content_hash_factory, crc32
from ubuntuone.platform import (
open_file,
stat_path,
)
from ubuntuone.platform.constants import HASHQUEUE_DELAY
NO_TIMESTAMP = None
class FixedOrderedDict(TxOrderedDict):
"""A t.p.u.OrderedDict that behaves like Python 2.7's OrderedDict."""
def popitem(self, last=False):
"""Take out the first or last item, and return it."""
index = -1 if last else 0
key = self._order[index]
value = self[key]
del self[key]
return (key, value)
def clear(self):
"""Remove every item from the dict."""
self._order = []
TxOrderedDict.clear(self)
try:
# try to use the OrderedDict from stdlib >= 2.7
from collections import OrderedDict as StdlibOrderedDict
OrderedDict = StdlibOrderedDict
except ImportError:
# if not available, use the patched one based on twisted
OrderedDict = FixedOrderedDict
class StopHashing(Exception):
"""The current hash was cancelled."""
class _Hasher(threading.Thread):
"""Class that lives in another thread, hashing all night long."""
def __init__(self, queue, end_mark, event_queue):
self.logger = logging.getLogger('ubuntuone.SyncDaemon.HQ.hasher')
self.end_mark = end_mark
self.queue = queue
self.eq = event_queue
# mutex to access _should_cancel and _hashing attributes
self.mutex = threading.Lock()
self._should_cancel = None
self._stopped = True # start stopped
self.chunk_size = 2**16
self.hashing = None
threading.Thread.__init__(self)
def run(self):
"""Run the thread."""
self._stopped = False
while True:
if self._stopped:
break
info, timestamp = self.queue.get()
if info is self.end_mark:
self._stopped = True
self.queue.task_done()
break
path, mdid = info
with self.mutex:
self.hashing = path
m = "Hasher: got file to hash: path %r mdid %s"
self.logger.debug(m, path, mdid)
now = time.time()
delta = timestamp - now
if delta > 0:
self.logger.trace("Waiting %f before starting hash", delta)
time.sleep(delta)
try:
result = self._hash(path)
except (IOError, OSError), e:
m = "Hasher: hash error %s (path %r mdid %s)"
self.logger.debug(m, e, path, mdid)
reactor.callLater(.1, reactor.callFromThread, self.eq.push,
"HQ_HASH_ERROR", mdid=mdid)
except StopHashing, e:
self.logger.debug(str(e))
else:
hashdata, crc, size, stat = result
self.logger.debug("Hasher: path hash pushed: path=%r hash=%s"
" crc=%s size=%d st_ino=%d st_size=%d"
" st_mtime=%r", path, hashdata,crc, size,
stat.st_ino, stat.st_size, stat.st_mtime)
reactor.callFromThread(self.eq.push, "HQ_HASH_NEW", path=path,
hash=hashdata, crc32=crc,
size=size, stat=stat)
finally:
with self.mutex:
self.hashing = None
self.queue.task_done()
|
def stop(self):
"""Stop the hasher.
Will be effective in the next loop if a hash is in progress.
"""
# clear the queue to push a end_mark, just to unblok if we are waiting
# for a new item
self.queue.clear()
# set the end_mark in case we are waiting a path
item = (self.end_mark, NO_TIMESTAMP)
self.queue.put(item)
self._stopped =
|
True
def _hash(self, path):
"""Actually hashes a file."""
hasher = content_hash_factory()
crc = 0
size = 0
try:
initial_stat = stat_path(path)
with open_file(path, 'rb') as fh:
while True:
# stop hashing if path_to_cancel == path or _stopped is True
with self.mutex:
path_to_cancel = self._should_cancel
if path_to_cancel == path or self._stopped:
raise StopHashing('hashing of %r was cancelled' % path)
cont = fh.read(self.chunk_size)
if not cont:
break
hasher.update(cont)
crc = crc32(cont, crc)
size += len(cont)
finally:
with self.mutex:
self._should_cancel = None
return hasher.content_hash(), crc, size, initial_stat
def busy(self):
"""Return whether we are busy."""
with self.mutex:
return self.hashing
def cancel_if_running(self, path):
"""Request a cancel/stop of the current hash, if it's == path."""
with self.mutex:
if self.hashing == path:
self._should_cancel = path
class HashQueue(object):
"""Interface between the real Hasher and the rest of the world."""
def __init__(self, event_queue):
self.logger = logging.getLogger('ubuntuone.SyncDaemon.HQ')
self._stopped = False
self._queue = UniqueQueue()
self._end_mark = object()
self.hasher = _Hasher(self._queue, self._end_mark, event_queue)
self.hasher.setDaemon(True)
self.hasher.start()
self.logger.info("HashQueue: _hasher started")
def _timestamp(self):
"""A timestamp with a small delay into the future."""
return time.time() + HASHQUEUE_DELAY
def insert(self, path, mdid):
"""Insert the path of a file to be hashed."""
if self._stopped:
self.logger.warning("HashQueue: already stopped when received "
"path %r mdid %s", path, mdid)
return
self.logger.debug("HashQueue: inserting path %r mdid %s", path, mdid)
self.hasher.cancel_if_running(path)
item = ((path, mdid), self._timestamp())
self._queue.put(item)
def shutdown(self):
"""Shutdown all resources and clear the queue"""
# clear the queue
self._queue.clear()
# stop the hasher
self.hasher.stop()
self._stopped = True
self.logger.info("HashQueue: _hasher stopped")
def empty(self):
"""Return whether we are empty or not"""
return self._queue.empty() and not self.hasher.busy()
def __len__(self):
"""Return the length of the queue (not reliable!)"""
return self._queue.qsize()
def is_hashing(self, path, mdid):
"""Return if the path is being hashed or in the queue."
|
gpfreitas/bokeh
|
bokeh/embed.py
|
Python
|
bsd-3-clause
| 20,330
| 0.005804
|
''' Provide functions to embed Bokeh models (e.g., plots, widget, layouts)
in various different ways.
There are a number of different combinations of options when embedding
Bokeh plots. The data for the plot can be contained in the document,
or on a Bokeh server, or in a sidecar JavaScript file. Likewise, BokehJS
may be inlined in the document, or loaded from CDN or a Bokeh server.
The functions in ``bokeh.embed`` provide functionality to embed in all
these different cases.
'''
from __future__ import absolute_import
import re
import uuid
from warnings import warn
from .templates import (
AUTOLOAD_JS, AUTOLOAD_TAG, FILE,
NOTEBOOK_DIV, PLOT_DIV, DOC_JS, SCRIPT_TAG
)
from .util.string import encode_utf8
from .plot_object import PlotObject, _ModelInDocument
from ._json_encoder import serialize_json
from .resources import DEFAULT_SERVER_HTTP_URL
from .client import DEFAULT_SESSION_ID
from .document import Document
from collections import Sequence
from six import string_types
def _wrap_
|
in_function(code):
# indent and wrap Bokeh function def around
code = "\n".join([" "
|
+ line for line in code.split("\n")])
return 'Bokeh.$(function() {\n%s\n});' % code
def components(plot_objects, resources=None, wrap_script=True, wrap_plot_info=True):
'''
Return HTML components to embed a Bokeh plot. The data for the plot is
stored directly in the returned HTML.
An example can be found in examples/embed/embed_multiple.py
.. note::
The returned components assume that BokehJS resources are
**already loaded**.
Args:
plot_objects (PlotObject|list|dict|tuple) :
A single PlotObject, a list/tuple of PlotObjects, or a dictionary of keys and PlotObjects.
resources :
Deprecated argument
wrap_script (boolean, optional) :
If True, the returned javascript is wrapped in a script tag.
(default: True)
wrap_plot_info (boolean, optional) : If True, returns ``<div>`` strings.
Otherwise, return dicts that can be used to build your own divs.
(default: True)
If False, the returned dictionary contains the following information:
.. code-block:: python
{
'modelid': 'The model ID, used with Document.get_model_by_id',
'elementid': 'The css identifier the BokehJS will look for to target the plot',
'docid': 'Used by Bokeh to find the doc embedded in the returned script',
}
Returns:
UTF-8 encoded *(script, div[s])* or *(raw_script, plot_info[s])*
Examples:
With default wrapping parameter values:
.. code-block:: python
components(plot)
# => (script, plot_div)
components((plot1, plot2))
# => (script, (plot1_div, plot2_div))
components({"Plot 1": plot1, "Plot 2": plot2})
# => (script, {"Plot 1": plot1_div, "Plot 2": plot2_div})
Examples:
With wrapping parameters set to ``False``:
.. code-block:: python
components(plot, wrap_script=False, wrap_plot_info=False)
# => (javascript, plot_dict)
components((plot1, plot2), wrap_script=False, wrap_plot_info=False)
# => (javascript, (plot1_dict, plot2_dict))
components({"Plot 1": plot1, "Plot 2": plot2}, wrap_script=False, wrap_plot_info=False)
# => (javascript, {"Plot 1": plot1_dict, "Plot 2": plot2_dict})
'''
if resources is not None:
warn('Because the ``resources`` argument is no longer needed, '
'it is deprecated and no longer has any effect',
DeprecationWarning, stacklevel=2)
# 1) Convert single items and dicts into list
was_single_object = isinstance(plot_objects, PlotObject) or isinstance(plot_objects, Document)
# converts single to list
plot_objects = _check_plot_objects(plot_objects, allow_dict=True)
# now convert dict to list, saving keys in the same order
plot_object_keys = None
if isinstance(plot_objects, dict):
plot_object_keys = plot_objects.keys()
values = []
# don't just use .values() to ensure we are in the same order as key list
for k in plot_object_keys:
values.append(plot_objects[k])
plot_objects = values
# 2) Do our rendering
with _ModelInDocument(plot_objects):
(docs_json, render_items) = _standalone_docs_json_and_render_items(plot_objects)
custom_models = _extract_custom_models(plot_objects)
script = _script_for_render_items(docs_json, render_items, custom_models=custom_models,
websocket_url=None, wrap_script=wrap_script)
script = encode_utf8(script)
if wrap_plot_info:
results = list(_div_for_render_item(item) for item in render_items)
else:
results = render_items
# 3) convert back to the input shape
if was_single_object:
return script, results[0]
elif plot_object_keys is not None:
result = {}
for (key, value) in zip(plot_object_keys, results):
result[key] = value
return script, result
else:
return script, tuple(results)
def _escape_code(code):
""" Escape JS/CS source code, so that it can be embbeded in a JS string.
This is based on https://github.com/joliss/js-string-escape.
"""
def escape(match):
ch = match.group(0)
if ch == '"' or ch == "'" or ch == '\\':
return '\\' + ch
elif ch == '\n':
return '\\n'
elif ch == '\r':
return '\\r'
elif ch == '\u2028':
return '\\u2028'
elif ch == '\u2029':
return '\\u2029'
return re.sub(u"""['"\\\n\r\u2028\u2029]""", escape, code)
def _extract_custom_models(plot_objects):
custom_models = {}
def extract_from_model(model):
for r in model.references():
impl = getattr(r.__class__, "__implementation__", None)
if impl is not None:
name = r.__class__.__name__
impl = "['%s', {}]" % _escape_code(impl)
custom_models[name] = impl
for o in plot_objects:
if isinstance(o, Document):
for r in o.roots:
extract_from_model(r)
else:
extract_from_model(o)
return custom_models
def notebook_div(plot_object):
''' Return HTML for a div that will display a Bokeh plot in an
IPython Notebook
The data for the plot is stored directly in the returned HTML.
Args:
plot_object (PlotObject) : Bokeh object to render
Returns:
UTF-8 encoded HTML text for a ``<div>``
.. note::
Assumes :func:`~bokeh.util.notebook.load_notebook` or the equivalent
has already been executed.
'''
plot_object = _check_one_plot_object(plot_object)
with _ModelInDocument(plot_object):
(docs_json, render_items) = _standalone_docs_json_and_render_items([plot_object])
custom_models = _extract_custom_models([plot_object])
script = _script_for_render_items(docs_json, render_items,
custom_models=custom_models,
websocket_url=None)
item = render_items[0]
div = _div_for_render_item(item)
html = NOTEBOOK_DIV.render(
plot_script = script,
plot_div = div,
)
return encode_utf8(html)
def _use_widgets(plot_objects):
from .models.widgets import Widget
for o in plot_objects:
if isinstance(o, Document):
if _use_widgets(o.roots):
return True
else:
if any(isinstance(model, Widget) for model in o.references()):
return True
return False
def file_html(plot_objects,
resources,
title,
js_resources=None,
css_resources=None,
template=FILE,
template_variables={}):
'''Return an HTML document that embeds Bokeh PlotObject or Do
|
zetaops/ulakbus
|
selenium_tests/test_ogrenci_iletisim_bilgileri.py
|
Python
|
gpl-3.0
| 1,614
| 0.006196
|
# -*- coding: utf-8 -*-
from test_settings import Settings
class TestCase(Settings):
def test_sidebar(self):
# Ayarlari yapiyor.
self.do_settings()
# Genel'e tikliyor.
self.driver.find_element_by_css_selector(
'li.ng-binding:nth-child(3) > a:nth-child(1) > span:nth-child(2)').click()
# Ogrenci Iletisim Bilgilerine tikliyor.
self.driver.find_element_by_css_selector('ul.in:nth-child(2) > li:nth-child(2) > a:nth-child(1)').click()
self.do_login()
# Genel'e tikliyor.
self.driver.find_element_by_css_selector(
'li.ng-binding:nth-child(3) > a:nth-child(1) > span:nth-child(2)').click()
# Ogrenci Iletisim Bilgilerine tikliyor.
self.driver.find_element_by_css_selector('ul.in:nth-child(2) > li:nth-child(2) > a:nth-child(1)').click()
# Ikamet Il'e deger gonderiyor.
self.driver.find_element_by_css_selecto
|
r('#ikamet_il').send_keys('Bilecik')
# Ikamet Ilce'ye deger gonderiyor.
self.driver.find_element_by_css_selector('#ikamet_ilce').send_keys('Merkez')
# Ikametgah Adresine deger yolluyor.
self.driver.find_element_by_css_selector('#ikamet_adresi'
|
).send_keys('balim sokak')
# Posta Kodu'na deger yolluyor.
self.driver.find_element_by_css_selector('#posta_kodu').send_keys('11000')
# Telefon Numarasi'na deger yolluyor.
self.driver.find_element_by_css_selector('#tel_no').send_keys('0534626286816')
# Kaydet'e tikliyor
self.driver.find_element_by_css_selector('button.btn-danger:nth-child(1)').click()
|
reyrodrigues/EU-SMS
|
temba/values/tests.py
|
Python
|
agpl-3.0
| 20,151
| 0.00263
|
from __future__ import unicode_literals
import json
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.utils import timezone
from mock import patch
from temba.contacts.models import ContactField
from temba.flows.models import RuleSet
from temba.orgs.models import Language
from temba.tests import FlowFileTest
from .models import Value
class ResultTest(FlowFileTest):
def assertResult(self, result, index, category, count):
self.assertEquals(count, result['categories'][index]['count'])
self.assertEquals(category, result['categories'][index]['label'])
def test_field_results(self):
(c1, c2, c3, c4) = (self.create_contact("Contact1", '0788111111'),
self.create_contact("Contact2", '0788222222'),
self.create_contact("Contact3", '0788333333'),
self.create_contact("Contact4", '0788444444'))
# create a gender field that uses strings
gender = ContactField.get_or_create(self.org, self.admin, 'gender', label="Gender", value_type=Value.TYPE_TEXT)
c1.set_field(self.user, 'gender', "Male")
c2.set_field(self.user, 'gender', "Female")
c3.set_field(self.user, 'gender', "Female")
result = Value.get_value_summary(contact_field=gender)[0]
self.assertEquals(2, len(result['categories']))
self.assertEquals(3, result['set'])
self.assertEquals(2, result['unset']) # this is two as we have the default contact created by our unit tests
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "Female", 2)
self.assertResult(result, 1, "Male", 1)
# create an born field that uses decimals
born = ContactField.get_or_create(self.org, self.admin, 'born', label="Born", value_type=Value.TYPE_DECIMAL)
c1.set_field(self.user, 'born', 1977)
c2.set_field(self.user, 'born', 1990)
c3.set_field(self.user, 'born', 1977)
result = Value.get_value_summary(contact_field=born)[0]
self.assertEquals(2, len(result['categories']))
self.assertEquals(3, result['set'])
self.assertEquals(2, result['unset'])
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "1977", 2)
self.assertResult(result, 1, "1990", 1)
# ok, state field!
state = ContactField.get_or_create(self.org, self.admin, 'state', label="State", value_type=Value.TYPE_STATE)
c1.set_field(self.user, 'state', "Kigali City")
c2.set_field(self.user, 'state', "Kigali City")
result = Value.get_value_summary(contact_field=state)[0]
self.assertEquals(1, len(result['categories']))
self.assertEquals(2, result['set'])
self.assertEquals(3, result['unset'])
self.assertResult(result, 0, "1708283", 2)
reg_date = ContactField.get_or_create(self.org, self.admin, 'reg_date', label="Registration Date", value_type=Value.TYPE_DATETIME)
now = timezone.now()
c1.set_field(self.user, 'reg_date', now.replace(hour=9))
c2.set_field(self.user, 'reg_date', now.replace(hour=4))
c3.set_field(self.user, 'reg_date', now - timedelta(days=1))
result = Value.get_value_summary(contact_field=reg_date)[0]
self.assertEquals(2, len(result['categories']))
self.assertEquals(3, result['set'])
self.assertEquals(2,
|
result['unset'])
self.assertResult(result, 0, now.replace(hour=0, minute=0, second=0, microsecond=0), 2)
self.assertResult(result, 1, (now - timedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0), 1)
# make sure categories returned are sorted by count, not name
c2.set_field(self.user, 'gender', "Male")
result = Value.get_value_summary(contact_field=gender)[0]
self.as
|
sertEquals(2, len(result['categories']))
self.assertEquals(3, result['set'])
self.assertEquals(2, result['unset']) # this is two as we have the default contact created by our unit tests
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "Male", 2)
self.assertResult(result, 1, "Female", 1)
# check the modified date is tracked for fields
original_value = Value.objects.get(contact=c1, contact_field=gender)
c1.set_field(self.user, 'gender', 'unknown')
new_value = Value.objects.get(contact=c1, contact_field=gender)
self.assertTrue(new_value.modified_on > original_value.modified_on)
self.assertNotEqual(new_value.string_value, original_value.string_value)
def run_color_gender_flow(self, contact, color, gender, age):
self.assertEqual(self.send_message(self.flow, color, contact=contact, restart_participants=True), "What is your gender?")
self.assertEqual(self.send_message(self.flow, gender, contact=contact), "What is your age?")
self.assertEqual(self.send_message(self.flow, age, contact=contact), "Thanks.")
def setup_color_gender_flow(self):
self.flow = self.get_flow('color_gender_age')
(self.c1, self.c2, self.c3, self.c4) = (self.create_contact("Contact1", '0788111111'),
self.create_contact("Contact2", '0788222222'),
self.create_contact("Contact3", '0788333333'),
self.create_contact("Contact4", '0788444444'))
def test_category_results(self):
self.setup_color_gender_flow()
# create a state field:
# assign c1 and c2 to Kigali
state = ContactField.get_or_create(self.org, self.admin, 'state', label="State", value_type=Value.TYPE_STATE)
district = ContactField.get_or_create(self.org, self.admin, 'district', label="District", value_type=Value.TYPE_DISTRICT)
self.c1.set_field(self.user, 'state', "Kigali City")
self.c1.set_field(self.user, 'district', "Kigali")
self.c2.set_field(self.user, 'state', "Kigali City")
self.c2.set_field(self.user, 'district', "Kigali")
self.run_color_gender_flow(self.c1, "red", "male", "16")
self.run_color_gender_flow(self.c2, "blue", "female", "19")
self.run_color_gender_flow(self.c3, "green", "male", "75")
self.run_color_gender_flow(self.c4, "maroon", "female", "50")
# create a group of the women
ladies = self.create_group("Ladies", [self.c2, self.c4])
# get our rulesets
color = RuleSet.objects.get(flow=self.flow, label="Color")
gender = RuleSet.objects.get(flow=self.flow, label="Gender")
age = RuleSet.objects.get(flow=self.flow, label="Age")
# categories should be in the same order as our rules, should have correct counts
result = Value.get_value_summary(ruleset=color)[0]
self.assertEquals(3, len(result['categories']))
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "Red", 2)
self.assertResult(result, 1, "Blue", 1)
self.assertResult(result, 2, "Green", 1)
# check our age category as well
result = Value.get_value_summary(ruleset=age)[0]
self.assertEquals(3, len(result['categories']))
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "Child", 1)
self.assertResult(result, 1, "Adult", 2)
self.assertResult(result, 2, "Senior", 1)
# and our gender categories
result = Value.get_value_summary(ruleset=gender)[0]
self.assertEquals(2, len(result['categories']))
self.assertFalse(result['open_ended'])
self.assertResult(result, 0, "Male", 2)
self.assertResult(result, 1, "Female", 2)
# now filter the results and only get responses by men
result = Value.get_value_summary(ruleset=color, filters=[dict(ruleset=gender.pk, categories=["Male"])])[0]
self.assertResult(result, 0, "Red", 1)
self.assertResult(result, 1, "Blue", 0)
self.assertResult(result, 2, "Green", 1)
# what about men that are adults?
result = Value.g
|
softinus/Movie_DataMiner
|
boxofficemojo.com/BoxOfficeMojo_Scraping_Code.py
|
Python
|
apache-2.0
| 13,739
| 0.011427
|
# written in python 3.6.1
#-*- coding: utf-8 -*-
from urllib.request import urlopen
import json
import string
import re
from bs4 import BeautifulSoup
import logging
import time
FILE_PATH = "./boxofficemojo.com/movie_data.txt"
LOG_PATH = "./boxofficemojo.com/scraping.log"
logging.basicConfig(filename=LOG_PATH,level=logging.DEBUG)
Keys = ["Name", "URL", "Genre","Runtime", "Rating", "MovieRanking"
, "PercentageofTotalGross", "WidestRelease", "CloseDate", "InRelease", "TotalGross"
, "Distributor", "Budget", "Domestic_Gross", "Domestic_Percentage"
, "Foreign_Gross", "Foreign_Percentage", "Worldwide_Gross", "OpeningWeekend"
, "Countryclicktoviewweekendbreakdown", "Dist", "ReleaseDate"
, "OpeningWknd", "ofTotal", "TotalGross", "AsOf"]
def add_empty_data(arrData, count):
for i in range(0,count):
arrData.append(" ")
return arrData
def remove_special_chars(dictData):
newDict= {}
for key in dictData:
new_key= re.sub(r'\W+', '', key)
newDict[new_key] = dictData[key]
return newDict
def save_to_json(filePath, dictData, countriesData=None):
dictData = remove_special_chars(dictData)
countriesData = remove_special_chars(countriesData)
if countriesData:
merged = dict(dictData)
merged.update(countriesData)
dictData = merged
with open(filePath, "a") as outfile:
json.dump(dictData, outfile, ensure_ascii=False)
def write_header(filePath):
# Write a header
text_file = open(filePath, "ab")
for header in Keys:
text_file.write((header + u"|").encode('utf-8'))
text_file.write("\n".encode('utf-8'))
text_file.close()
def save_to_file(filePath, dictData, countriesData=None):
dictData = remove_special_chars(dictData)
if countriesData:
countriesData = remove_special_chars(countriesData)
if countriesData:
merged = dict(dictData)
merged.update(countriesData)
dictData = merged
Arranged= []
add_empty_data(Arranged, 50)
text_file = open(filePath, "ab")
for key, value in dictData.items():
for i ,k in enumerate(Keys):
if key == k:
Arranged[i]= value
for data in Arranged:
text_file.write((data + u"|").encode('utf-8'))
text_file.write("\n".encode('utf-8'))
text_file.close()
def get_total_lifetime_grosses(link, arrData):
url = "http://www.boxofficemojo.com"+ link
page = urlopen(url)
soup = BeautifulSoup(page, "lxml")
# Assume that domestic info is from USA
arrData['Countryclicktoviewweekendbreakdown']= "USA"
#print(main_tbl)
tables = soup.find_all('table', attrs={'border': '0' , 'cellspacing':'0', 'cellpadding':'0' , 'width':'100%'})
#print( len(tables))
#td_count = 9
if len(tables) == 4:
#print(tables[3]) # Total lifetime grosses
mp_boxes= tables[3].find_all("div", {"class", "mp_box_tab"})
a= len(mp_boxes)
for box in mp_boxes:
if(box.text == "Total Lifetime Grosses"):
div_content= box.findNext('div')
trs = div_content.find_all('tr')
for tr in trs:
tds = tr.find_all('td')
if len(tds) == 3:
|
if tds[0].text.strip() == "Domestic:":
arrData["Total Gross"] = tds[1].text.strip()
|
arrData["% ofTotal"] = tds[2].text.strip()
arrData[tds[0].text.strip()+"_Gross"] = tds[1].text.strip()
arrData[tds[0].text.strip()+"_Percentage"] = tds[2].text.strip()
if(box.text == "Domestic Summary"):
div_content = box.findNext('div')
DS_tables = div_content.find_all('table', attrs = { 'border': '0' , 'cellspacing':'0', 'cellpadding':'0'})
for DS_table in DS_tables:
DS_trs = DS_table.find_all('tr')
for DS_tr in DS_trs:
DS_tr_title = DS_tr.td.text.strip()
if(DS_tr_title == "Opening\xa0Weekend:") or (DS_tr_title == "Opening Weekend:"):
DS_tr_content = DS_tr.td.findNext('td')
if DS_tr_content:
arrData["Opening Weekend"] = DS_tr_content.text.strip()
arrData["OpeningWknd"] = DS_tr_content.text.strip()
elif "(#" in DS_tr_title:
arrData['Movie Ranking'] = DS_tr_title
elif "%\xa0of\xa0Total\xa0Gross" in DS_tr_title or "% of Total Gross" in DS_tr_title:
DS_tr_content = DS_tr.td.findNext('td')
if DS_tr_content:
arrData['Percentage of Total Gross'] = DS_tr_content.text.strip()
elif DS_tr_title == "Widest\xa0Release:" or DS_tr_title == "Widest Release:":
DS_tr_content = DS_tr.td.findNext('td')
if DS_tr_content:
arrData['Widest Release'] = DS_tr_content.text.strip() # 14.
elif DS_tr_title == "Close\xa0Date:" or DS_tr_title == "Close Date:":
DS_tr_content = DS_tr.td.findNext('td')
if DS_tr_content:
arrData['Close Date'] = DS_tr_content.text.strip() # 15.
elif DS_tr_title == "In\xa0Release:" or DS_tr_title == "In Release:":
DS_tr_content = DS_tr.td.findNext('td')
if DS_tr_content:
arrData['In Release'] = DS_tr_content.text.strip() # 15.
if(box.text == "The Players"):
#print(box.findNext('div'))
pass
return arrData
def get_movie_foreign(link, arrData):
try:
eachCountry = {}
ColumnHeaders= []
url = "http://www.boxofficemojo.com"+ link + "&page=intl"
page = urlopen(url)
soup = BeautifulSoup(page, "lxml")
contents = soup.find('table', attrs={'border': '3' , 'cellspacing':'0', 'cellpadding':'5', 'align':'center', 'style':'margin-top: 5px;'})
if len(contents) == 1:
#print(contents)
intl_table = contents.tr.td.table
if intl_table:
trs = intl_table.find_all("tr")
if len(trs) == 3:
#print ("no data")
temp= 0
else:
for row,tr in enumerate(trs):
if row == 0:
tds= tr.find_all("td") # get each header's text
for td in tds:
header= td.text.strip()
if "/" in header:
divided_header = header.split('/')
ColumnHeaders.append(divided_header[0])
ColumnHeaders.append(divided_header[1])
else:
ColumnHeaders.append(td.text.strip())
if(row < 3): # don't save unncessary data
continue
tds= tr.find_all("td")
for column, td in enumerate(tds):
# 11. Country, 12.Dist, 13. Release Date, 14.OW, 15.% of Total, 16.Total gross, 17. as of
eachCountry[ColumnHeaders[column]] = td.text.strip()
save_to_file(FILE_PATH, arrData, eachCountry)
#save_to_json(FILE_PATH, arrData, eachCountry)
eachCountry.clear()
return arrData
except Exception as e:
logging.exception(e)
return arrData
def get_movie_detail(movies_list, link, arrData):
if link not in movies_list:
|
makkus/pyclist
|
pyclist/model_helpers.py
|
Python
|
apache-2.0
| 8,972
| 0.000892
|
import booby
from booby import fields
from booby.inspection import get_fields, is_model
from booby.validators import Required
from pydoc import locate
from collections import OrderedDict
from collections import OrderedDict
from tabulate import tabulate
import readline
MODEL_MAP = {}
class tabCompleter(object):
"""
A tab completer that can either complete from
the filesystem or from a list.
Partially taken from:
http://stackoverflow.com/questions/5637124/tab-completion-in-pythons-raw-input
"""
def createListCompleter(self, ll):
"""
This is a closure that creates a method that auto
|
completes from
the given list.
Since the autocomplete function can't be given a list to complete from
a closure is
|
used to create the listCompleter function with a list to complete
from.
"""
def listCompleter(text, state):
line = readline.get_line_buffer()
if not line:
return [c + " " for c in ll][state]
else:
return [c + " " for c in ll if c.startswith(line)][state]
self.listCompleter = listCompleter
def ensure_json_value(value):
if is_model(value):
return dict(value)
else:
return value
def ensure_json(value):
if isinstance(value, (list, tuple)):
return [ensure_json_value(w) for w in value]
else:
return ensure_json_value(value)
class EditModel(object):
def __init__(self, model_type, current_value, help_map):
self.model_type = model_type
self.current_value = current_value
self.new_value = {}
self.help_map = help_map
def get_fields(self):
required_details = OrderedDict()
non_required_details = OrderedDict()
for k, f in sorted(get_fields(self.model_type).iteritems()):
if is_required(f):
required_details[k] = f
else:
non_required_details[k] = f
details = OrderedDict()
for k, f in required_details.iteritems():
details[k] = f
for k, f in non_required_details.iteritems():
details[k] = f
return details
def edit_field(self, field_name):
new_field_value = self.ask_field(field_name)
# field = get_fields(self.current_value).get(field_name)
value = ensure_json(new_field_value)
self.new_value[field_name] = value
def ask_field(self, field_name):
field_type = self.model_type.__dict__.get(field_name, None)
if not field_type:
print "No field of that name."
new_value = ask_detail_for_field(
field_name, field_type, None, self.help_map)
if is_model(new_value):
new_value = new_value.to_json()
return new_value
def print_current(self):
fields = self.get_fields()
table = []
i = 1
for k, v in fields.iteritems():
value = getattr(self.current_value, k, None)
row = [k, convert_for_print(value)]
table.append(row)
i = i + 1
print tabulate(table)
def print_new(self):
print self.new_value
def convert_value_to_print(value):
f = getattr(value, 'to_json', None)
if callable(f):
value = value.to_json()
return value
def convert_for_print(value):
if isinstance(value, (list, tuple)):
if len(value) > 0:
value = (convert_value_to_print(w) for w in value)
value = "[" + ", ".join(value) + "]"
else:
value = ""
else:
value = convert_value_to_print(value)
return value
def get_type(model):
if type(model) == fields.Integer or model == fields.Integer:
return 'Integer'
elif type(model) == fields.String or model == fields.String:
return 'String'
else:
return model.__name__
def is_required(field):
return next((True for x in field.validators if isinstance(x, Required)), False)
def convert_to_proper_base_type(base_type, value):
'''
Converts the string input in the appropriate value type.
'''
if get_type(base_type) == 'Integer':
return int(value)
elif get_type(base_type) == 'String':
return value
elif get_type(base_type) == 'Boolean':
return bool(value)
else:
return value
def edit_details_for_type(model_type, old_object, help_map={}):
'''
Asks for user input to change an existing model.
'''
m = EditModel(model_type, old_object, help_map)
print
print "Current values:"
print
m.print_current()
print
selection = "xxx"
print
print "Caution: the new value will replace the old value, not be added to it."
print
while selection:
selection = raw_input("field to edit ('enter' to finish): ")
if selection:
print
m.edit_field(selection)
print
return m.new_value
def ask_details_for_type(model_type, ask_only_required=True, help_map={}):
'''
Asks for user input to create an object of a specified type.
If the type is registered in a model/builder map, the function associated
with this type is used to create the object instead of the auto-generated
query.
'''
if MODEL_MAP.get(model_type, None):
func = MODEL_MAP[model_type]
return func()
required_details = OrderedDict()
non_required_details = OrderedDict()
values = {}
for k, f in sorted(get_fields(model_type).iteritems()):
if is_required(f):
required_details[k] = f
else:
non_required_details[k] = f
print
print "Enter values for fields below. Enter '?' or '? arg1 [arg2]' for help for each field."
print
print "Required fields:"
print "----------------"
print
for k, f in required_details.iteritems():
while True:
value = ask_detail_for_field(k, f, ask_only_required, help_map)
if value:
values[k] = value
break
else:
print
print "This is a required field, please enter value for {}.".format(k)
print
if not ask_only_required:
print
print "Optional fields, press 'Enter' to ignore a field."
print "-------------------------------------------------"
print
for k, f in non_required_details.iteritems():
value = ask_detail_for_field(k, f, ask_only_required, help_map)
if value:
values[k] = value
print
obj = model_type(**values)
return obj
def ask_collection_detail(name, detail_type, ask_only_required=True, help_map={}):
result = []
print "Enter details for '{}', multiple entries possible, press enter to continue to next field.".format(name)
print
while True:
cd = ask_detail_for_field(
name, detail_type, ask_only_required, help_map)
if not cd:
break
else:
result.append(cd)
return result
def parse_for_help(answer, help_func):
if answer.startswith('?'):
args = answer.split(' ')[1:]
if not help_func:
print 'Sorry, no help available for this field.'
else:
print
help_func(*args)
print
return True
else:
return False
def ask_simple_field(name, field_type, help_map={}):
type_name = get_type(field_type)
answer = raw_input(" - {} ({}): ".format(name, type_name))
if not answer:
return None
if parse_for_help(answer, help_map.get(name, None)):
return ask_simple_field(name, field_type, help_map)
try:
value = convert_to_proper_base_type(field_type, answer)
except Exception as e:
print "Can't convert input: ", e
return ask_simple_field(name, field_type, help_map)
return value
def ask_detail_for_field(name, detail_type, ask_only_required=True, help_map={}):
value = None
if MODEL_MAP.get(type(detail_type), None):
func =
|
heatseeknyc/data-science
|
src/wunderground.py
|
Python
|
mit
| 2,014
| 0.036743
|
import urllib2, json, time, sys
from datetime import date, datetime
from dateutil.rrule import rrule, DAILY
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", dest="fahrenheit", action="store", default=False, type="string", help="Convert to FAHRENHEIT")
parser.add_option("-e", dest="end", action="store", default=False, type="string", help="START date")
parser.add_option("-s", dest="start", action="store", default=False, type="string", help="END date")
parser.add_option("-t", dest="token", action="store", default=False, type="string", help="Weather Underground TOKEN")
(options, args) = parser.parse_args()
if options.token:
token = options.token
else:
parser.print_help()
sys.exit()
if options.start:
start = options.start
else:
parser.print_help()
sys.exit()
if options.end:
end = options.end
else:
parser.print_help()
sys.exit()
if options.fahrenheit:
fahrenheit = True
else:
fahrenheit = False
start = datetime.strptime(start,'%Y-%m-%d')
end = datetime.strptime(end,'%Y-%m-%d')
url = ""
if end < start:
print "Error: end date " + str(end) + " occurs before start date " + str(start)
sys.exit()
for dt in rrule(DAILY, dtstart=start, until=end):
total = 0.0
temp = 0.0
count = 0
wunderground_url ="http://api.wunderground.com/api/" + token + "/history_" + dt.strftime("%Y%m%d") +"/q/NY/New_York_City.json"
try:
url = urllib2.urlopen(wunderground_url)
parsed_json = json.loads(ur
|
l.read())
except:
print "Error reading URL " + wunderground_url
print "Is your token correct?"
url.close()
sys.exit()
try:
for mean in parsed_json['history']['observations']:
if fahrenheit:
total += float(mean['tempi'])
else:
total += float(mean['tempm'])
count += 1
temp = (total / count)
print dt.strftime("%Y-%m-%d") + "," + str(temp)
|
except:
print "Error retrieving temperature records for start date " + str(start) + " end date " + str(end)
url.close()
time.sleep(10)
|
michaelcho/redberry
|
redberry/utils/logger.py
|
Python
|
apache-2.0
| 327
| 0.003058
|
import logging
def init_logger():
formatter = logging.Formatter('%(asctime)s
|
%(levelname)s: %(message)s [in %(pathname)s:%
|
(lineno)d]')
logger = logging.getLogger('redberry')
logger.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setFormatter(formatter)
logger.addHandler(console)
|
pexip/os-python-amqp
|
t/unit/test_abstract_channel.py
|
Python
|
lgpl-2.1
| 4,759
| 0
|
from __future__ import absolute_import, unicode_literals
import pytest
from case import Mock, patch
from vine import promise
from amqp.abstract_channel import AbstractChannel
from amqp.exceptions import AMQPNotImplementedError, RecoverableConnectionError
from amqp.serialization import dumps
class test_AbstractChannel:
class Channel(AbstractChannel):
def _setup_listeners(self):
pass
@pytest.fixture(autouse=True)
def setup_conn(self):
self.conn = Mock(name='connection')
self.conn.channels = {}
self.channel_id = 1
self.c = self.Channel(self.conn, self.channel_id)
self.method = Mock(name='method')
self.content = Mock(name='content')
self.content.content_encoding = 'utf-8'
self.c._METHODS = {(50, 61): s
|
elf.method}
def test_enter_exit(self):
self.c.close = Mock(name='close')
with self.c:
pass
|
self.c.close.assert_called_with()
def test_send_method(self):
self.c.send_method((50, 60), 'iB', (30, 0))
self.conn.frame_writer.assert_called_with(
1, self.channel_id, (50, 60), dumps('iB', (30, 0)), None,
)
def test_send_method__callback(self):
callback = Mock(name='callback')
p = promise(callback)
self.c.send_method((50, 60), 'iB', (30, 0), callback=p)
callback.assert_called_with()
def test_send_method__wait(self):
self.c.wait = Mock(name='wait')
self.c.send_method((50, 60), 'iB', (30, 0), wait=(50, 61))
self.c.wait.assert_called_with((50, 61), returns_tuple=False)
def test_send_method__no_connection(self):
self.c.connection = None
with pytest.raises(RecoverableConnectionError):
self.c.send_method((50, 60))
def test_send_method__connection_dropped(self):
self.c.connection.frame_writer.side_effect = StopIteration
with pytest.raises(RecoverableConnectionError):
self.c.send_method((50, 60))
def test_close(self):
with pytest.raises(NotImplementedError):
self.c.close()
def test_wait(self):
with patch('amqp.abstract_channel.ensure_promise') as ensure_promise:
p = ensure_promise.return_value
p.ready = False
def on_drain(*args, **kwargs):
p.ready = True
self.conn.drain_events.side_effect = on_drain
p.value = (1,), {'arg': 2}
self.c.wait((50, 61), timeout=1)
self.conn.drain_events.assert_called_with(timeout=1)
prev = self.c._pending[(50, 61)] = Mock(name='p2')
p.value = None
self.c.wait([(50, 61)])
assert self.c._pending[(50, 61)] is prev
def test_dispatch_method__content_encoding(self):
self.c.auto_decode = True
self.method.args = None
self.c.dispatch_method((50, 61), 'payload', self.content)
self.content.body.decode.side_effect = KeyError()
self.c.dispatch_method((50, 61), 'payload', self.content)
def test_dispatch_method__unknown_method(self):
with pytest.raises(AMQPNotImplementedError):
self.c.dispatch_method((100, 131), 'payload', self.content)
def test_dispatch_method__one_shot(self):
self.method.args = None
p = self.c._pending[(50, 61)] = Mock(name='oneshot')
self.c.dispatch_method((50, 61), 'payload', self.content)
p.assert_called_with((50, 61), self.content)
def test_dispatch_method__one_shot_no_content(self):
self.method.args = None
self.method.content = None
p = self.c._pending[(50, 61)] = Mock(name='oneshot')
self.c.dispatch_method((50, 61), 'payload', self.content)
p.assert_called_with((50, 61))
assert not self.c._pending
def test_dispatch_method__listeners(self):
with patch('amqp.abstract_channel.loads') as loads:
loads.return_value = [1, 2, 3], 'foo'
p = self.c._callbacks[(50, 61)] = Mock(name='p')
self.c.dispatch_method((50, 61), 'payload', self.content)
p.assert_called_with(1, 2, 3, self.content)
def test_dispatch_method__listeners_and_one_shot(self):
with patch('amqp.abstract_channel.loads') as loads:
loads.return_value = [1, 2, 3], 'foo'
p1 = self.c._callbacks[(50, 61)] = Mock(name='p')
p2 = self.c._pending[(50, 61)] = Mock(name='oneshot')
self.c.dispatch_method((50, 61), 'payload', self.content)
p1.assert_called_with(1, 2, 3, self.content)
p2.assert_called_with((50, 61), 1, 2, 3, self.content)
assert not self.c._pending
assert self.c._callbacks[(50, 61)]
|
PrefPy/opra
|
compsocsite/appauth/migrations/0017_userprofile_exp_data.py
|
Python
|
mit
| 453
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-01-24 07:34
from __future_
|
_ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('appauth', '0016_userprofile_numq'),
]
operations =
|
[
migrations.AddField(
model_name='userprofile',
name='exp_data',
field=models.TextField(default='{}'),
),
]
|
duy/python-foafcert
|
foafcert/gen_cacert.py
|
Python
|
gpl-2.0
| 4,578
| 0.008301
|
#!/usr/bin/python
# vim: set expandtab tabstop=4 shiftwidth=4:
# -*- coding: utf-8 -*-
# gen_cacert <http://rhizomatik.net/>
# Python functions for generate a X509 CA certificate
#
# Copyright (C) 2010 duy at rhizomatik dot net
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"""
gen_cacert
Python functions for generate a X509 CA certificate.
Usage: execute ./gen_cacert -h
@author: duy
@organization: rhizomatik labs
@copyright: author
@license: GNU GPL version 3 or any later version
(details at http://www.gnu.org)
@contact: duy at rhizomatik dot net
@dependencies: python (>= version 2.5)
@change log:
@TODO:
* Get error/warning when some of the main parameters have space and th
at and the nexts get ignored
* Add paramter for certificate serial path
"""
__app__ = "gen_cacert"
__author__ = "duy"
__version__ = "0.1"
__copyright__ = "Copyright (c) 2010 duy"
__date__ = "2010/03/01"
__license__ = " GNU GPL version 3 or any later version (details at http://www.gnu.org)"
__credits__ = ""
from xmpp_foaf_cert import *
import sys
import getopt
DEBUG = True
## ----------------------------------------------------------------------
## administrative functions
## ----------------------------------------------------------------------
def _usage():
print "Usage: %s options" % __app__
print """
Options:
-h, --help Print this usage message.
-d, --debug
-p, --certificate-path CA certificate path
-k, --certificate-key-path CA private key path
-n, --commmonname certificate commonName
-c, --country certificate countryName
-o, --organization certificate organizationName
-u, --organizationalunit certificate organizationalUnitNam
-e, --email certificate emailAddress
"""
def _version():
"""
Display a formatted version string for the module
"""
print """%(__app__)s %(__version__)s
%(__copyright__)s
released %(__date__)s
Thanks to:
%(__credits__)s""" % globals()
def main(argv):
"""
Create an x509 CA certificate and save it as PEM file
@param CN: certificate commonName
@param C: certificate countryName
@param O: certificate organizationName
@param OU: certificate organizationalUnitName
@param Email: certificate emailAddress
@type CN: string
@type C: string
@type O: string
@type OU: string
@type Email: string
@param cacert_path: CA certificate path
@param cakey_path: CA private key path
@type cacert_path: string
@type cakey_path: string
"""
short_opts = "hdp:k:n:c:o:u:e:"
long_opts = ["help","debug", "certificate-path=","certificate-key-path=","commonname=","country=","organization=","organizationalunit=","email="]
try:
opts, args = getopt.getopt(argv, short_opts, long_opts)
except getopt.GetoptError:
print "The CA certificate will be created with default values"
# _usage()
# sys.exit(0)
# Example default values
CN = "CA Certificate"
C = "CR"
O="Rhizomatik Labs"
OU="Mycelia project"
Email="ca@rhizomatik.net"
cacert_path='/tmp/xmpp_foaf_cacert.pem'
cakey_path='/tmp/xmpp_foaf_cakey.key'
for opt, arg in opts:
if opt in ("-h", "--help"):
_usage()
sys.exit(0)
elif opt in ("-p","--certificate-path"):
cacert_path = arg
elif opt in ("-k","--certificate-key-path"):
cakey_path = arg
elif opt in ("-n","--commmonname"):
CN = arg
elif opt in ("-c","--country"):
C = arg
elif opt in ("-o","--organization"):
O = arg
elif opt in ("-u","--organizationalunit"):
|
OU = arg
elif opt in ("-e","--email"):
Email = arg
if DEBUG:
print "CN: "+CN
print "C: "+C
print "O: "+O
print "OU: "+OU
print
|
"Email: "+Email
mkcacert_save(cacert_path, cakey_path, CN, C, O, OU, Email)
if __name__ == "__main__":
main(sys.argv[1:])
|
bamueh/dark-matter
|
test/test_alignments.py
|
Python
|
mit
| 7,278
| 0
|
import six
from unittest import TestCase
from dark.reads import Read, Reads
from dark.score import HigherIsBetterScore
from dark.hsp import HSP, LSP
from dark.alignments import (
Alignment, bestAlignment, ReadAlignments, ReadsAlignmentsParams,
ReadsAlignments)
class TestAlignment(TestCase):
"""
Tests for the dark.alignment.Alignment class
"""
def testExpectedAttrs(self):
"""
An alignment must have the expected attributes.
"""
alignment = Alignment(45, 'title')
self.assertEqual('title', alignment.subjectTitle)
self.assertEqual(45, alignment.subjectLength)
def testNoHspsWhenCreated(self):
"""
An alignment must have no HSPs when it is created.
"""
alignment = Alignment(45, 'title')
self.assertEqual(0, len(alignment.hsps))
def testAddHsp(self):
"""
It must be possible to add an HSP to an alignment.
"""
alignment = Alignment(45, 'title')
alignment.addHsp(HSP(3))
self.assertEqual(HSP(3), alignment.hsps[0])
class TestReadAlignments(TestCase):
"""
Tests for the dark.alignment.ReadAlignments class
"""
def testRead(self):
"""
An read alignments must store its read.
"""
read = Read('id', 'ACGT')
readAlignments = ReadAlignments(read)
self.assertEqual(read, readAlignments.read)
def testNoAlignments(self):
"""
An read alignments must be able to have no alignments.
"""
read = Read('id', 'ACGT')
readAlignments = ReadAlignments(read)
self.assertEqual(0, len(readAlignments))
def testAlignments(self):
"""
An read alignments must store its alignments.
"""
read = Read('id', 'ACGT')
alignment1 = Alignment(45, 'title1')
alignment2 = Alignment(55, 'title2')
readAlignments = ReadAlignments(read, [alignment1, alignment2])
self.assertEqual([alignment1, alignment2], readAlignments)
class TestBestAlignmentHSP(TestCase):
"""
Test the L{dark.hits.bestAlignment} function when HSPs are used.
"""
def testOneAlignment(self):
"""
When one alignment is present that alignment must be returned by
bestAlignment.
"""
alignment = Alignment(44, 'Seq 1')
alignment.addHsp(HSP(10))
alignment.addHsp(HSP(9))
alignments = [alignment]
hit = ReadAlignments(Read('id1', 'aaa'), alignments)
best = bestAlignment(hit)
self.assertEqual('Seq 1', best.subjectTitle)
self.assertEqual(44, best.subjectLength)
def testThreeAlignments(self):
"""
When three alignments are present, the one with the highest first HSP
must be returned by bestAlignment.
"""
alignment1 = Alignment(33, 'Seq 1')
alignment1.addHsp(HSP(10))
alignment1.addHsp(HSP(9))
alignment2 = Alignment(44, 'Seq 2')
alignment2.addHsp(HSP(30))
alignment2.addHsp(HSP(29))
alignment3 = Alignment(55, 'Seq 3')
alignment3.addHsp(HSP(20))
alignment3.addHsp(HSP(19))
alignments = [alignment1, alignment2, alignment3]
hit = ReadAlignments(Read('id1', 'aaa'), alignments)
best = bestAlignment(hit)
self.assertEqual('Seq 2', best.subjectTitle)
self.assertEqual(44, best.subjectLength)
class TestBestAlignmentLSP(TestCase):
"""
Test the L{dark.hits.bestAlignment} function when LSPs are used.
"""
def testOneAlignment(self):
"""
When one alignment is present that alignment must be returned by
bestAlignment.
"""
alignment = Alignment(44, 'Seq 1')
alignment.addHsp(LSP(10))
alignment.addHsp(LSP(9))
alignments = [alignment]
readAlignments = ReadAlignments(Read('id0', 'aaa'), alignments)
best = bestAlignment(readAlignments)
self.assertEqual('Seq 1', best.subjectTitle)
self.assertEqual(44, best.subjectLength)
def testThreeAlignments(self):
"""
When three alignments are present, the one with the lowest first HSP
must be returned by bestAlignment.
"""
alignment1 = Alignment(33, 'Seq 1')
alignment1.addHsp(LSP(10))
alignment1.addHsp(LSP(9))
alignment2 = Alignment(44, 'Seq 2')
alignment2.addHsp(LSP(3))
alignment2.addHsp(LSP(2))
alignment3 = Alignment(55, 'Seq 3')
alignment3.addHsp(LSP(20))
alignment3.addHsp(LSP(19))
alignments = [alignment1, alignment2, alignment3]
readAlignments = ReadAlignments(Read('id0', 'aaa'), alignments)
best = bestAlignment(readAlignments)
self.assertEqual('Seq 2', best.subjectTitle)
self.assertEqual(44, best.subjectLength)
class TestReadsAlignmentsParams(TestCase):
"""
Test the L{dark.alignments.ReadsAlignmentsParams} class.
"""
def testExpectedAttrs(self):
"""
A ReadsAlignmentsParams instance must have the expected attributes.
"""
applicationParams = {}
params = ReadsAlignmentsParams('application name', applicationParams,
False, 'Bit score')
self.assertEqual('application name', params.application)
self.assertIs(applicationParams, params.applicationParams)
self.assertFalse(params.subjectIsNucleotides)
self.assertEqual('Bit score', params.scoreTitle)
class TestReadsAlignments(TestCase):
"""
Test the L{dark.alignments.ReadsAlignments} class.
"""
# NOTE: The ReadsAlignments class is a base class for concrete
# implementations, such as BlastReadsAlignments. So it can only be
# tested minimally by itself. For full tests see the
# TestBlastReadsAlignments and TestBlastReadsAlignmentsFiltering
# classes in test/blast/blast_alignments.py
def testExpectedAttrs(self):
"""
A ReadsAlignments instance must have the expected attributes.
"""
reads = Reads()
params = {
'application': 'app name'
}
readsAlignments = ReadsAlignments(reads, params)
self.assertIs(readsAlignments.reads, reads)
self.assertEqual('app name', readsAlignments.params['application'])
self.assertIs(params, readsAlignments.params)
self.assertIs(HigherIsBetterScore, readsAlignments.scoreClass)
def testNotIterable(self):
"""
Iterating an empty ReadsAlignments must result in the empty list.
"""
reads = Reads()
readsAlignments = ReadsAlignments(reads, 'applicationName', None)
self.assertEqual([], list(readsAlignments))
def testGetSubjectSequence(self):
"""
A ReadsAlignments instance will not implement getSubjectSequence.
Subclasses are expected to implement it.
"""
reads = Reads()
readsAlignments = ReadsAlignments(reads, 'applicationName', None)
error = 'getSubjectSequence must be implemented by a subclass'
six.assertRaisesRegex(self, NotImplementedError, error,
reads
|
Alignments.getSubjectSequence, 'title'
|
)
|
MirichST/patchcap
|
src/daemon/platedetector.py
|
Python
|
gpl-2.0
| 11,192
| 0.028145
|
import cv2
import logging
import numpy as np
import os
import sys
from lib.warping import ImageBlobWarping
from lib.singleton import Singleton
from logging import FileHandler, StreamHandler
from multiprocessing import Pool
from ocr import Ocr
from timeit import default_timer as timer
from vlogging import VisualRecord
from lib.timing import timing
logger = logging.getLogger(__name__)
"""
Process an image and tries to find a rectangle looking similar to an argentine license plate
and do ocr
"""
@Singleton
class PlateDetector(object):
def __init__(self):
self.vlogger = None
self.pre= None
self.edged= None
self.warp= ImageBlobWarping()
self.bnight= False
self.ocr_engine = Ocr('spa', logger)
def set_logger(self, logger):
self.vlogger = logger
def bestocr(self, ocrlst):
ocr_overall_acc_lst= []
imax= -1
if len(ocrlst) == 0:
return imax
ocr_acc= 0
#~ print ocrlst
for ocritm in ocrlst:
#~ print ocritm
for det in ocritm:
if det is not None and det[1] != None:
try:
ocr_acc = ocr_acc + det[1]**2
except:
pass
if len(ocritm) > 0:
ocr_acc /= len(ocritm)
ocr_acc= ocr_acc**0.5
print "ocr_acc: %.3f %%"%ocr_acc
ocr_overall_acc_lst.append(round(ocr_acc,3))
imax= max(ocr_overall_acc_lst)
return ocr_overall_acc_lst.index(imax)
""" Return best text recognized """
def first(self, img):
bbox= None
code= None
cnt= None
blobs= self.findBlobs( img )
ocrlst= []
bboxlst= []
for orig_rot_blob in blobs:
bb= np.int0(cv2.boxPoints( orig_rot_blob ))
bbox= cv2.boundingRect( bb )
w= bbox[2]
h= bbox[3]
if (w > 2*h) and (w > 80) and (w < 200): # this should be relative to image dimensions
code = self.ocr( orig_rot_blob )
if code:
ocrlst.append( code )
bboxlst.append( bbox )
print code
if len(code) == 6:
break
# hardcoded -- width should not be higher than img.width / 8
if (w > 2*h) and (w > 80) and (w < 400): # second stage without max size constraints
code = self.ocr( orig_rot_blob )
if code:
ocrlst.append( code )
bboxlst.append( bbox )
if len(code) == 6:
break
if len( ocrlst ) > 0:
ocr_best_index= self.bestocr( ocrlst )
if ocr_best_index != -1:
code = ocrlst[ ocr_best_index ]
bbox = bboxlst[ ocr_best_index ]
else:
print "none"
return code, bbox
def findBlobs(self, img):
rects= []
cnts= self.findContours(img)
for c in cnts:
c= c.reshape(-1, 2)
if len(c) < 4:
continue
arcl= cv2.arcLength(c, True)
approx= cv2.approxPolyDP(c, 0.02 * arcl, True)
approx= approx.reshape(-1, 2)
rect= cv2.minAreaRect(approx)
w, h= rect[1]
if len(approx) >= 4:
if (h > 0) and (w > h):
ratio = float(w) / h
if 2.4 < ratio < 4.2:
rects.append(rect)
return rects
def ocr(self, rect):
ang= rect[2]
w,h= rect[1]
if ang < -45:
ang= ang + 90
w= h
h= rect[1][0]
box= cv2.boxPoints(rect)
box= np.int0(box)
box= self.warp.order_points(box)
letters= []
code= []
try:
roic= self.warp.transform(self.edged, box)
roi= self.warp.transform(self.pre, box)
roi_orig= self.warp.transform(self.original_image, box)
except:
pass
print "some error"
return code
(roich, roicw)= roic.shape[:2]
nh= 143
if roich > 200:
nw= (roicw * nh)/roich
roi= cv2.resize(roi,(nw, nh), interpolation= cv2.INTER_LINEAR)
roic= cv2.resize(roic,(nw, nh), interpolation= cv2.INTER_LINEAR)
#~ self.do_skeleton(roi)
image_rect= self.prepare_for_ocr(roi)
image_rect2= image_rect.copy()
if self.vlogger:
self.vlogger.debug(VisualRecord("candidate", [image_rect], fmt = "jpg"))
i, cnts, hie_letters= cv2.findContours(image_rect, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if self.vlogger:
self.vlogger.debug(VisualRecord("candidate after contours", [cv2.drawContours(roi_orig,cnts,-1,(0,255,0),1)], fmt = "jpg"))
h= roic.shape[0]
filtered_cnts= []
for i,b in enumerate(cnts):
hie_let= hie_letters[0][i]
# [next, previous, first_child, parent]
if hie_let[3] == -1: # if contour has no parent then continue with next
continue
c = b.reshape(-1,2)
if len(b) < 3: # ??
continue
r= cv2.boundingRect(c)
# pantentes.txt - las letras miden 3.2cm y la patente completa 29.4cm
if r[2] < (image_rect.shape[1] / 10):
continue
ratio= float(r[3]) / r[2]
if not 1.5 <= ratio <= 2.5:
continue
letters.append(r)
filtered_cnts.append(b)
if len(letters) >= 4:
for p in enumerate(sorted(letters, key= lambda b:b[0])):
code.append(self._do_ocr(image_rect2, p[1], p[0]))
if self.vlogger:
self.vlogger.debug(VisualRecord("LETTER DETECTION", [cv2.drawContours(image_rect2,filtered_cnts,-1,(0,255,0),1)], fmt = "jpg"))
return code
def _do_ocr(self, img, b, i):
x,y,w,h = b
l = cv2.copyMakeBorder(
img[y:y+h, x:x+w],
5, 5, 5, 5, cv2.BORDER_CONSTANT,
value=255)
if i > 2:
return self.ocr_engine.read_digit(l)
return self.ocr_engine.read_text(l)
def findContours(self, img):
imgcopy= img.copy()
if self.bnight:
i= self.prepare_night(img)
else:
i= self.prepare_day(img)
_,cnts, hie = cv2.findContours(i, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if self.vlogger:
if self.bnight:
self.vlogger.debug(VisualRecord("contours", [cv2.drawContours(imgcopy,cnts,-1, (80,255,80),2),i], fmt = "jpg"))
else:
self.vlogger.debug(VisualRecord("contours", [cv2.drawContours(imgcopy,cnts,-1, (255,120,120),2),i], fmt = "jpg"))
return cnts
####################################################################################################
|
def prepare_night(self, img):
tinit= timer()
self.original_image= img
gray= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gauss_gray= cv2.GaussianBlur(gray, (5, 5), 0)
max_gray= np.max(gray)
std_gray= np.std(gray)
saturated_night= np.uint8(( gray > ( max_gray - 2 * std_gray )) * 255) # argentina
self.pre= gauss_gray
self.edged= cv2.Canny(saturated_night, 10, 200, apertureSize= 5)
if self.vlogger:
|
self.vlogger.debug(VisualRecord("thresholding > (max - 2 * std)", [saturated_night], fmt = "jpg"))
print "e:%.3f"%(timer()-tinit)
return self.edged
####################################################################################################
def prepare_day(self, img):
self.original_image= img
gray= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gauss_gray= cv2.GaussianBlur(gray, (5, 5), 0)
self.pre= gauss_gray
self.edged= cv2.Canny(gauss_gray, 1000, 1700, apertureSize= 5)
if self.vlogger:
self.vlogger.debug(VisualRecord("day prepare", [self.pre, self.edged], fmt = "jpg"))
return self.edged
#################################################################################
|
interrogator/corpkit
|
corpkit/dictionaries/__init__.py
|
Python
|
mit
| 774
| 0.002584
|
__all__ = ["wo
|
rdlists", "roles", "bnc", "processes", "verbs",
"uktous", "tagtoclass", "queries", "mergetags"]
from corpkit.dictionaries.bnc import _get_bnc
from corpkit.dictionaries.process_types import processes
from corpkit.dictionaries.process_types import verbs
from corpkit.dictionaries.roles import roles
from corpkit.dictionaries.wordlists import wordlists
from corpkit.dictionaries.queries import queries
from corpkit.dictionaries.word_transforms import t
|
aglemma
from corpkit.dictionaries.word_transforms import mergetags
from corpkit.dictionaries.word_transforms import usa_convert
roles = roles
wordlists = wordlists
processes = processes
bnc = _get_bnc
queries = queries
tagtoclass = taglemma
uktous = usa_convert
mergetags = mergetags
verbs = verbs
|
bilbeyt/ituro
|
ituro/simulation/admin.py
|
Python
|
mit
| 972
| 0.007202
|
from django.contrib import admin
from simulation.models import SimulationStage, SimulationStageMatch, Simulatio
|
nStageMatchResult
class SimulationStageAdmin(admin.ModelAdmin):
list_display = ["number", "
|
created_at"]
list_filter = ["created_at"]
class SimulationStageMatchAdmin(admin.ModelAdmin):
list_display = ["stage", "order", "raund",
"cat", "rat", "won", "created_at"]
list_filter = ["stage", "created_at"]
search_fields = ["cat", "rat"]
readonly_fields = ["won", "cat_password", "rat_password", "system_password"]
class SimulationStageMatchResultAdmin(admin.ModelAdmin):
list_display = ["match", "is_caught", "distance", "is_cancelled", "created_at"]
list_filter = ["created_at"]
search_fields = ["match"]
admin.site.register(SimulationStage, SimulationStageAdmin)
admin.site.register(SimulationStageMatch, SimulationStageMatchAdmin)
admin.site.register(SimulationStageMatchResult, SimulationStageMatchResultAdmin)
|
asoliveira/NumShip
|
scripts/entrada/padrao/plot-1cg.py
|
Python
|
gpl-3.0
| 489
| 0.02686
|
#!/usr/bin/env pytho
|
n
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import scipy as sp
#Nome do arquivo em que está os dados da posição
arq = 'CurvaGiro/pos.dat'
#Limites dos eixos
v = [-10,1000, 0, 1000]
#Título eixo x
xl = r'y metros'
#Título do eixo y
yl = r'x metros'
x = sp.genfromtxt('CurvaGiro/pos.dat')
a = plt.plot(x[:,2], x[:,1], 'k-')
plt.grid(True, 'both', color = '0.8', linestyle = '--', linewidth = 1)
plt.axis(v)
plt.xlabel(xl)
plt.ylabel(yl)
p
|
lt.show(a)
|
elektito/finglish
|
finglish/f2p.py
|
Python
|
mit
| 6,952
| 0.002158
|
#!/usr/bin/env python3
import os
import re
import itertools
from functools import reduce
from .version import __version__
sep_regex = re.compile(r'[ \-_~!@#%$^&*\(\)\[\]\{\}/\:;"|,./?`]')
def get_portable_filename(filename):
path, _ = os.path.split(__file__)
filename = os.path.join(path, filename)
return filename
def load_conversion_file(filename):
filename = get_portable_filename(filename)
with open(filename, encoding='utf-8') as f:
l = list(f)
l = [i for i in l if i.strip()]
l = [i.strip().split() for i in l]
return {i[0]: i[1:] for i in l}
print('Loading converters...')
beginning = load_conversion_file('f2p-beginning.txt')
middle = load_conversion_file('f2p-middle.txt')
ending = load_conversion_file('f2p-ending.txt')
print('Loading persian word list...')
with open(get_portable_filename('persian-word-freq.txt'), encoding='utf-8') as f:
word_freq = list(f)
word_freq = [i.strip() for i in word_freq if i.strip()]
word_freq = [i.split() for i in word_freq if not i.startswith('#')]
word_freq = {i[0]: int(i[1]) for i in word_freq}
print('Loading dictionary...')
with open(get_portable_filename('f2p-dict.txt'), encoding='utf-8') as f:
dictionary = [i.strip().split(' ', 1) for i in f if i.strip()]
dictionary = {k.strip(): v.strip() for k, v in dictionary}
def f2p_word_internal(word, original_word):
# this function receives the word as separate letters
persian = []
for i, letter in enumerate(word):
if i == 0:
converter = beginning
elif i == len(word) - 1:
converter = ending
else:
converter = middle
conversions = converter.get(letter)
if conversions == None:
return [(''.join(original_word), 0.0)]
else:
conversions = ['' if i == 'nothing' else i for i in conversions]
persian.append(conversions)
alternatives = itertools.product(*persian)
alternatives = [''.join(i) for i in alternatives]
alternatives = [(i, word_freq[i]) if i in word_freq else (i, 0)
for i in alternatives]
if len(alternatives) > 0:
max_freq = max(freq for _, freq in alternatives)
alternatives = [(w, float(freq / max_freq)) if freq != 0 else (w, 0.0)
for w, freq in alternatives]
else:
alternatives = [(''.join(word), 1.0)]
return alternatives
def variations(word):
"""Create variations of the word based on letter combinations like oo,
sh, etc."""
if word == 'a':
return [['A']]
elif len(word) == 1:
return [[word[0]]]
elif word == 'aa':
return [['A']]
elif word == 'ee':
return [['i']]
elif word == 'ei':
return [['ei']]
elif word in ['oo', 'ou']:
return [['u']]
elif word == 'kha':
return [['kha'], ['kh', 'a']]
elif word in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']:
return [[word]]
elif word in ["'ee", "'ei"]:
return [["'i"]]
elif word in ["'oo", "'ou"]:
return [["'u"]]
elif word in ["a'", "e'", "o'",
|
"i'", "u'", "A'"]:
return [[word[0] + "'"]]
elif word in ["'a", "'e", "'o", "'i", "'u", "'A"]:
return [["'" + word[1]]]
elif len(word) == 2 and word[0] == word[1]:
return [[word[0]]]
if word[:2] == 'aa':
return [['A'] + i for i in variations(word[2:])]
elif
|
word[:2] == 'ee':
return [['i'] + i for i in variations(word[2:])]
elif word[:2] in ['oo', 'ou']:
return [['u'] + i for i in variations(word[2:])]
elif word[:3] == 'kha':
return \
[['kha'] + i for i in variations(word[3:])] + \
[['kh', 'a'] + i for i in variations(word[3:])] + \
[['k', 'h', 'a'] + i for i in variations(word[3:])]
elif word[:2] in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']:
return \
[[word[:2]] + i for i in variations(word[2:])] + \
[[word[0]] + i for i in variations(word[1:])]
elif word[:2] in ["a'", "e'", "o'", "i'", "u'", "A'"]:
return [[word[:2]] + i for i in variations(word[2:])]
elif word[:3] in ["'ee", "'ei"]:
return [["'i"] + i for i in variations(word[3:])]
elif word[:3] in ["'oo", "'ou"]:
return [["'u"] + i for i in variations(word[3:])]
elif word[:2] in ["'a", "'e", "'o", "'i", "'u", "'A"]:
return [[word[:2]] + i for i in variations(word[2:])]
elif len(word) >= 2 and word[0] == word[1]:
return [[word[0]] + i for i in variations(word[2:])]
else:
return [[word[0]] + i for i in variations(word[1:])]
def f2p_word(word, max_word_size=15, cutoff=3):
"""Convert a single word from Finglish to Persian.
max_word_size: Maximum size of the words to consider. Words larger
than this will be kept unchanged.
cutoff: The cut-off point. For each word, there could be many
possibilities. By default 3 of these possibilities are considered
for each word. This number can be changed by this argument.
"""
original_word = word
word = word.lower()
c = dictionary.get(word)
if c:
return [(c, 1.0)]
if word == '':
return []
elif len(word) > max_word_size:
return [(original_word, 1.0)]
results = []
for w in variations(word):
results.extend(f2p_word_internal(w, original_word))
# sort results based on the confidence value
results.sort(key=lambda r: r[1], reverse=True)
# return the top three results in order to cut down on the number
# of possibilities.
return results[:cutoff]
def f2p_list(phrase, max_word_size=15, cutoff=3):
"""Convert a phrase from Finglish to Persian.
phrase: The phrase to convert.
max_word_size: Maximum size of the words to consider. Words larger
than this will be kept unchanged.
cutoff: The cut-off point. For each word, there could be many
possibilities. By default 3 of these possibilities are considered
for each word. This number can be changed by this argument.
Returns a list of lists, each sub-list contains a number of
possibilities for each word as a pair of (word, confidence)
values.
"""
# split the phrase into words
results = [w for w in sep_regex.split(phrase) if w]
# return an empty list if no words
if results == []:
return []
# convert each word separately
results = [f2p_word(w, max_word_size, cutoff) for w in results]
return results
def f2p(phrase, max_word_size=15, cutoff=3):
"""Convert a Finglish phrase to the most probable Persian phrase.
"""
results = f2p_list(phrase, max_word_size, cutoff)
return ' '.join(i[0][0] for i in results)
def main():
print('Finglish to Persian Converter, v{}'.format(__version__))
print('finglish: ', end='')
phrase = input()
result = f2p(phrase)
print(result)
if __name__ == '__main__':
main()
|
mattvonrocketstein/smash
|
smashlib/ipy3x/kernel/zmq/ipkernel.py
|
Python
|
mit
| 13,151
| 0.000912
|
"""The IPython kernel implement
|
ation"""
import getpass
import sys
import traceback
from IPython.core import release
from IPython.html.widgets import Widget
from IPython.utils.py3compat import builtin_mod, PY3
from IPython.utils.tokenutil import token_at_cursor, line_at_cursor
from IPython.utils.traitlets imp
|
ort Instance, Type, Any
from IPython.utils.decorators import undoc
from ..comm import CommManager
from .kernelbase import Kernel as KernelBase
from .serialize import serialize_object, unpack_apply_message
from .zmqshell import ZMQInteractiveShell
class IPythonKernel(KernelBase):
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
shell_class = Type(ZMQInteractiveShell)
user_module = Any()
def _user_module_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_module = new
user_ns = Instance(dict, args=None, allow_none=True)
def _user_ns_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_ns = new
self.shell.init_user_ns()
# A reference to the Python builtin 'raw_input' function.
# (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
_sys_raw_input = Any()
_sys_eval_input = Any()
def __init__(self, **kwargs):
super(IPythonKernel, self).__init__(**kwargs)
# Initialize the InteractiveShell subclass
self.shell = self.shell_class.instance(parent=self,
profile_dir=self.profile_dir,
user_module=self.user_module,
user_ns=self.user_ns,
kernel=self,
)
self.shell.displayhook.session = self.session
self.shell.displayhook.pub_socket = self.iopub_socket
self.shell.displayhook.topic = self._topic('execute_result')
self.shell.display_pub.session = self.session
self.shell.display_pub.pub_socket = self.iopub_socket
self.shell.data_pub.session = self.session
self.shell.data_pub.pub_socket = self.iopub_socket
# TMP - hack while developing
self.shell._reply_content = None
self.comm_manager = CommManager(shell=self.shell, parent=self,
kernel=self)
self.comm_manager.register_target(
'ipython.widget', Widget.handle_comm_opened)
self.shell.configurables.append(self.comm_manager)
comm_msg_types = ['comm_open', 'comm_msg', 'comm_close']
for msg_type in comm_msg_types:
self.shell_handlers[msg_type] = getattr(
self.comm_manager, msg_type)
# Kernel info fields
implementation = 'ipython'
implementation_version = release.version
language_info = {
'name': 'python',
'version': sys.version.split()[0],
'mimetype': 'text/x-python',
'codemirror_mode': {'name': 'ipython',
'version': sys.version_info[0]},
'pygments_lexer': 'ipython%d' % (3 if PY3 else 2),
'nbconvert_exporter': 'python',
'file_extension': '.py'
}
@property
def banner(self):
return self.shell.banner
def start(self):
self.shell.exit_now = False
super(IPythonKernel, self).start()
def set_parent(self, ident, parent):
"""Overridden from parent to tell the display hook and output streams
about the parent message.
"""
super(IPythonKernel, self).set_parent(ident, parent)
self.shell.set_parent(parent)
def _forward_input(self, allow_stdin=False):
"""Forward raw_input and getpass to the current frontend.
via input_request
"""
self._allow_stdin = allow_stdin
if PY3:
self._sys_raw_input = builtin_mod.input
builtin_mod.input = self.raw_input
else:
self._sys_raw_input = builtin_mod.raw_input
self._sys_eval_input = builtin_mod.input
builtin_mod.raw_input = self.raw_input
builtin_mod.input = lambda prompt='': eval(self.raw_input(prompt))
self._save_getpass = getpass.getpass
getpass.getpass = self.getpass
def _restore_input(self):
"""Restore raw_input, getpass"""
if PY3:
builtin_mod.input = self._sys_raw_input
else:
builtin_mod.raw_input = self._sys_raw_input
builtin_mod.input = self._sys_eval_input
getpass.getpass = self._save_getpass
@property
def execution_count(self):
return self.shell.execution_count
@execution_count.setter
def execution_count(self, value):
# Ignore the incrememnting done by KernelBase, in favour of our shell's
# execution counter.
pass
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
shell = self.shell # we'll need this a lot here
self._forward_input(allow_stdin)
reply_content = {}
# FIXME: the shell calls the exception handler itself.
shell._reply_content = None
try:
shell.run_cell(code, store_history=store_history, silent=silent)
except:
status = u'error'
# FIXME: this code right now isn't being used yet by default,
# because the run_cell() call above directly fires off exception
# reporting. This code, therefore, is only active in the scenario
# where runlines itself has an unhandled exception. We need to
# uniformize this, for all exception construction to come from a
# single location in the codbase.
etype, evalue, tb = sys.exc_info()
tb_list = traceback.format_exception(etype, evalue, tb)
reply_content.update(shell._showtraceback(etype, evalue, tb_list))
else:
status = u'ok'
finally:
self._restore_input()
reply_content[u'status'] = status
# Return the execution counter so clients can display prompts
reply_content['execution_count'] = shell.execution_count - 1
# FIXME - fish exception info out of shell, possibly left there by
# runlines. We'll need to clean up this logic later.
if shell._reply_content is not None:
reply_content.update(shell._reply_content)
e_info = dict(
engine_uuid=self.ident, engine_id=self.int_id, method='execute')
reply_content['engine_info'] = e_info
# reset after use
shell._reply_content = None
if 'traceback' in reply_content:
self.log.info(
"Exception in execute request:\n%s", '\n'.join(reply_content['traceback']))
# At this point, we can tell whether the main code execution succeeded
# or not. If it did, we proceed to evaluate user_expressions
if reply_content['status'] == 'ok':
reply_content[u'user_expressions'] = \
shell.user_expressions(user_expressions or {})
else:
# If there was an error, don't even try to compute expressions
reply_content[u'user_expressions'] = {}
# Payloads should be retrieved regardless of outcome, so we can both
# recover partial output (that could have been generated early in a
# block, before an error) and clear the payload system always.
reply_content[u'payload'] = shell.payload_manager.read_payload()
# Be agressive about clearing the payload because we don't want
# it to sit in memory until the next execute_request comes in.
shell.payload_manager.clear_payload()
return reply_content
def do_complete(self, code, cursor_pos):
# FIXME: IPython completers currently assume single line,
# but completion messages give multi-line context
# For now, extract line from cell, based on cursor_pos:
if cursor_pos is None:
|
pombredanne/opc-diag
|
opcdiag/phys_pkg.py
|
Python
|
mit
| 5,859
| 0
|
# -*- coding: utf-8 -*-
#
# phys_pkg.py
#
# Copyright (C) 2013 Steve Canny scanny@cisco.com
#
# This module is part of opc-diag and is released under the MIT License:
# http://www.opensource.org/licenses/mit-license.php
"""Interface to a physical OPC package, either a zip archive or directory"""
import os
import shutil
from zipfile import ZIP_DEFLATED, ZipFile
class BlobCollection(dict):
"""
Structures a set of blobs, like a set of files in an OPC package.
It can add and retrieve items by URI (relative path, roughly) and can
also retrieve items by uri_tail, the trailing portion of the URI.
"""
class PhysPkg(object):
"""
Provides read and write services for packages on the filesystem. Suitable
for use with OPC packages in either Zip or expanded directory form.
|PhysPkg| objects are iterable, generating a (uri, blob) 2-tuple for each
item in the package.
"""
def __init__(self, blobs, root_uri):
super(PhysPkg, self).__init__()
self._blobs = blobs
self._root_uri = root_uri
def __iter__(self):
"""
Generate a (uri, blob) 2-tuple for each of the items in the package.
"""
return iter(self._blobs.items())
@staticmethod
def read(path):
"""
Return a |PhysPkg| instance loaded with contents of OPC package at
*path*, where *path* can be either a regular zip package or a
directory containing an expanded package.
"""
if os.path.isdir(path):
return DirPhysPkg.read(path)
else:
return ZipPhysPkg.read(path)
@property
def root_uri(self):
return self._root_uri # pragma: no cover
@staticmethod
def write_to_dir(blobs, dirpath):
"""
Write the contents of the |BlobCollection| instance *blobs* to a
directory at *dirpath*. If a directory already exists at *dirpath*,
it is deleted before being recreated. If a file exists at *dirpath*,
|ValueError| is raised, to prevent unintentional overwriting.
"""
PhysPkg._clear_or_make_dir(dirpath)
for uri, blob in blobs.items():
PhysPkg._write_blob_to_dir(dirpath, uri, blob)
@staticmethod
def write_to_zip(blobs, pkg_zip_path):
"""
Write "files" in |BlobCollection| instance *blobs* to a zip archive
at *pkg_zip_path*.
"""
zipf = ZipFile(pkg_zip_path, 'w', ZIP_DEFLATED)
for uri in sorted(blobs.keys()):
blob = blobs[uri]
zipf.writestr(uri, blob)
zipf.close()
@staticmethod
def _clear_or_make_dir(dirpath):
"""
Create a new, empty directory at *dirpath*, removing and recreating
any directory found there. Raises |ValueError| if *dirpath* exists
but is not a directory.
"""
# raise if *dirpath* is a file
if os.path.exists(dirpath) and not os.path.isdir(dirpath):
tmpl = "target path '%s' is not a directory"
raise ValueError(tmpl % dirpath)
# remove any existing directory tree at *dirpath*
if os.path.exists(dirpath):
shutil.rmtree(dirpath)
# create dir at dirpath, as well as any intermediate-level dirs
os.makedirs(dirpath)
@staticmethod
def _write_blob_to_dir(dirpath, uri, blob):
"""
Write *blob* to a file under *dirpath*, where the segments of *uri*
that precede the filename are created, as required, as intermediate
directories.
"""
# In general, uri will contain forward slashes as segment separators.
# This next line converts them to backslashes on Windows.
item_relpath = os.path.normpath(uri)
fullpath = os.path.join(dirpath, item_relpath)
dirpath, filename = os.path.split(fullpath)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
with open(fullpath, 'wb') as f:
f.write(blob)
class DirPhysPkg(PhysPkg):
"""
An OPC physical package that has been expanded into individual files in
a directory structure that mirrors the pack URI.
"""
def __init__(self, blobs, root_uri):
super(DirPhysPkg, self).__init__(blobs, root_uri)
@classmethod
def read(cls, pkg_dir):
"""
Return a |BlobCollection| instance loaded from *pkg_dir*.
"""
blobs = BlobCollection()
pfx_len = len(pkg_dir)+1
for filepath in cls._filepaths_in_dir(pkg_dir):
uri = filepath[pfx_len:].replace('\\', '/')
with open(filepath, 'rb') as f:
blob = f.read()
blobs[uri] = blob
root_uri = pkg_dir
return cls(blobs, root_uri)
@staticmethod
|
def _filepaths_in_dir(dirpath):
"""
Return a sorted list of
|
relative paths, one for each of the files
under *dirpath*, recursively visiting all subdirectories.
"""
filepaths = []
for root, dirnames, filenames in os.walk(dirpath):
for filename in filenames:
filepath = os.path.join(root, filename)
filepaths.append(filepath)
return sorted(filepaths)
class ZipPhysPkg(PhysPkg):
"""
An OPC physical package in the typically encountered form, a zip archive.
"""
def __init__(self, blobs, root_uri):
super(ZipPhysPkg, self).__init__(blobs, root_uri)
@classmethod
def read(cls, pkg_zip_path):
"""
Return a |BlobCollection| instance loaded from *pkg_zip_path*.
"""
blobs = BlobCollection()
zipf = ZipFile(pkg_zip_path, 'r')
for name in zipf.namelist():
blobs[name] = zipf.read(name)
zipf.close()
root_uri = os.path.splitext(pkg_zip_path)[0]
return cls(blobs, root_uri)
|
geosolutions-it/ckanext-geonetwork
|
ckanext/geonetwork/harvesters/__init__.py
|
Python
|
gpl-3.0
| 309
| 0
|
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
ex
|
cept ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
from ckanext.geonetwork.harvesters.geonetwork import GeoNet
|
workHarvester
from ckanext.geonetwork.harvesters.utils import GeoNetworkClient
|
google/in-silico-labeling
|
isl/augment_test.py
|
Python
|
apache-2.0
| 2,507
| 0.002792
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from _
|
_future__ import division
from __future__ import print_function
import tensorflow as tf
|
# pylint: disable=g-bad-import-order
from isl import augment
from isl import test_util
from isl import util
flags = tf.flags
test = tf.test
lt = tf.contrib.labeled_tensor
FLAGS = flags.FLAGS
class CorruptTest(test_util.Base):
def setUp(self):
super(CorruptTest, self).setUp()
self.signal_lt = lt.select(self.input_lt, {'mask': util.slice_1(False)})
rc = lt.ReshapeCoder(['z', 'channel', 'mask'], ['channel'])
self.corrupt_coded_lt = augment.corrupt(0.1, 0.05, 0.1,
rc.encode(self.signal_lt))
self.corrupt_lt = rc.decode(self.corrupt_coded_lt)
def test_name(self):
self.assertIn('corrupt', self.corrupt_coded_lt.name)
def test(self):
self.assertEqual(self.corrupt_lt.axes, self.signal_lt.axes)
self.save_images('corrupt', [self.get_images('', self.corrupt_lt)])
self.assert_images_near('corrupt', True)
class AugmentTest(test_util.Base):
def setUp(self):
super(AugmentTest, self).setUp()
ap = augment.AugmentParameters(0.1, 0.05, 0.1)
self.input_augment_lt, self.target_augment_lt = augment.augment(
ap, self.input_lt, self.target_lt)
def test_name(self):
self.assertIn('augment/input', self.input_augment_lt.name)
self.assertIn('augment/target', self.target_augment_lt.name)
def test(self):
self.assertEqual(self.input_augment_lt.axes, self.input_lt.axes)
self.assertEqual(self.target_augment_lt.axes, self.target_lt.axes)
self.save_images('augment', [
self.get_images('input_', self.input_augment_lt),
self.get_images('target_', self.target_augment_lt)
])
self.assert_images_near('augment', True)
if __name__ == '__main__':
test.main()
|
horejsek/python-webdriverwrapper
|
tests/test_info.py
|
Python
|
mit
| 735
| 0.004082
|
import pytest
from webd
|
riverwrapper.exceptions import InfoMessagesException
def test_check_info_messages(driver_info_msgs):
with pytest.raises(InfoMessagesException) as excinfo:
driver_info_msgs.check_infos(expected_info_messages=('some-info',))
def test_check_expected_info_messages(driver_info_msgs):
driver_info_msgs.check_infos(expected_i
|
nfo_messages=('some-info', 'another-info'))
def test_check_allowed_info_messages(driver_info_msgs):
driver_info_msgs.check_infos(allowed_info_messages=('some-info', 'another-info'))
def test_check_expected_and_allowed_info_messages(driver_info_msgs):
driver_info_msgs.check_infos(expected_info_messages=('some-info',), allowed_info_messages=('another-info',))
|
felix-dumit/campusbot
|
yowsup2/yowsup/layers/protocol_iq/protocolentities/iq_ping.py
|
Python
|
mit
| 555
| 0.030631
|
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from .iq import I
|
qProtocolEntity
class PingIqProtocolEntity(IqProtocolEntity):
'''
Receive
<iq type="get" xmlns="urn:xmpp:ping" from="s.wh
|
atsapp.net" id="1416174955-ping">
</iq>
Send
<iq type="get" xmlns="w:p" to="s.whatsapp.net" id="1416174955-ping">
</iq>
'''
def __init__(self, _from = None, to = None, _id = None):
super(PingIqProtocolEntity, self).__init__("urn:xmpp:ping" if _from else "w:p", _id = _id, _type = "get", _from = _from, to = to)
|
xmikos/soapy_power
|
soapypower/psd.py
|
Python
|
mit
| 4,040
| 0.001733
|
#!/usr/bin/env python3
import math, logging, threading, concurrent.futures
import numpy
import simplespectral
from soapypower import threadpool
logger = logging.getLogger(__name__)
class PSD:
"""Compute averaged power spectral density using Welch's method"""
def __init__(self, bins, sample_rate, fft_window='hann', fft_overlap=0.5,
crop_factor=0, log_scale=True, remove_dc=False, detrend=None,
lnb_lo=0, max_threads=0, max_queue_size=0):
self._bins = bins
self._sample_rate = sample_rate
self._fft_window = fft_window
self._fft_overlap = fft_overlap
self._fft_overlap_bins = math.floor(self._bins * s
|
elf._fft_overlap)
self._crop_factor = crop_factor
self._log_scale = log_scale
self._remove_dc = remove_dc
self._detrend = detrend
self._lnb_lo = lnb_lo
se
|
lf._executor = threadpool.ThreadPoolExecutor(
max_workers=max_threads,
max_queue_size=max_queue_size,
thread_name_prefix='PSD_thread'
)
self._base_freq_array = numpy.fft.fftfreq(self._bins, 1 / self._sample_rate)
def set_center_freq(self, center_freq):
"""Set center frequency and clear averaged PSD data"""
psd_state = {
'repeats': 0,
'freq_array': self._base_freq_array + self._lnb_lo + center_freq,
'pwr_array': None,
'update_lock': threading.Lock(),
'futures': [],
}
return psd_state
def result(self, psd_state):
"""Return freqs and averaged PSD for given center frequency"""
freq_array = numpy.fft.fftshift(psd_state['freq_array'])
pwr_array = numpy.fft.fftshift(psd_state['pwr_array'])
if self._crop_factor:
crop_bins_half = round((self._crop_factor * self._bins) / 2)
freq_array = freq_array[crop_bins_half:-crop_bins_half]
pwr_array = pwr_array[crop_bins_half:-crop_bins_half]
if psd_state['repeats'] > 1:
pwr_array = pwr_array / psd_state['repeats']
if self._log_scale:
pwr_array = 10 * numpy.log10(pwr_array)
return (freq_array, pwr_array)
def wait_for_result(self, psd_state):
"""Wait for all PSD threads to finish and return result"""
if len(psd_state['futures']) > 1:
concurrent.futures.wait(psd_state['futures'])
elif psd_state['futures']:
psd_state['futures'][0].result()
return self.result(psd_state)
def result_async(self, psd_state):
"""Return freqs and averaged PSD for given center frequency (asynchronously in another thread)"""
return self._executor.submit(self.wait_for_result, psd_state)
def _release_future_memory(self, future):
"""Remove result from future to release memory"""
future._result = None
def update(self, psd_state, samples_array):
"""Compute PSD from samples and update average for given center frequency"""
freq_array, pwr_array = simplespectral.welch(samples_array, self._sample_rate, nperseg=self._bins,
window=self._fft_window, noverlap=self._fft_overlap_bins,
detrend=self._detrend)
if self._remove_dc:
pwr_array[0] = (pwr_array[1] + pwr_array[-1]) / 2
with psd_state['update_lock']:
psd_state['repeats'] += 1
if psd_state['pwr_array'] is None:
psd_state['pwr_array'] = pwr_array
else:
psd_state['pwr_array'] += pwr_array
def update_async(self, psd_state, samples_array):
"""Compute PSD from samples and update average for given center frequency (asynchronously in another thread)"""
future = self._executor.submit(self.update, psd_state, samples_array)
future.add_done_callback(self._release_future_memory)
psd_state['futures'].append(future)
return future
|
imperial-genomics-facility/data-management-python
|
igf_data/utils/project_data_display_utils.py
|
Python
|
apache-2.0
| 8,493
| 0.01672
|
import os
import pandas as pd
from igf_data.utils.seqrunutils import get_seqrun_date_from_igf_id
def _count_total_reads(data,seqrun_list):
'''
An internal function for counting total reads
required params:
:param data, A dictionary containing seqrun ids a key an read counts as values
:param seqrun_list, A list of sequencing runs
'''
try:
data['run_count'] = 0
if 'total_read' not in data:
data['total_read']=0
if len(seqrun_list) >1:
for run in seqrun_list:
if data[run] > 0:
data['run_count'] += 1
data['total_read'] += data[run]
#if data['run_count'] == 1:
# data['total_read'] = 0
return data
except:
raise
def convert_project_data_gviz_data(input_data,
sample_col='sample_igf_id',
read_count_col='attribute_value',
seqrun_col='flowcell_id'):
'''
A utility method for converting project's data availability information to
gviz data table format
https://developers.google.com/chart/interactive/docs/reference#DataTable
required params:
:param input_data: A pandas data frame, it should contain following columns
sample_igf_id,
flowcell_id,
attribute_value (R1_READ_COUNT)
:param sample_col, Column name for sample id, default sample_igf_id
:param seqrun_col, Column name for sequencing run identifier, default flowcell_id
:param read_count_col, Column name for sample read counts, default attribute_value
return
a dictionary of description
a list of data dictionary
a tuple of column_order
'''
try:
if not isinstance(input_data, pd.DataFrame):
raise AttributeError('Expecting a pandas dataframe and got {0}'.\
format(type(input_data)))
input_data[read_count_col]=input_data[read_count_col].astype(float) # convert reac counts to int
processed_data=input_data.\
pivot_table(values=read_count_col,
index=[sample_col,
seqrun_col],
aggfunc='sum') # group data by sample id and seq runs
processed_data.\
reset_index([sample_col,
seqrun_col],
inplace=True) # reset index for processed data
intermediate_data=list() # define empty intermediate data structure
seqrun_set=set() # define empty seqrun set
for line in processed_data.to_dict(orient='records'): # reformat processed data to required structure
tmp_data=dict()
tmp_data.update({sample_col:line[sample_col],
line[seqrun_col]:line[read_count_col]})
seqrun_set.add(line[seqrun_col])
intermediate_data.append(tmp_data)
intermediate_data=pd.DataFrame(intermediate_data) # convert intermediate data to dataframe
intermediate_data.fillna(0,inplace=True) # replace NAN values with zero
intermediate_data=intermediate_data.\
pivot_table(index=sample_col,
aggfunc='sum').\
reset_index(sample_col) # group data by samples id
intermediate_data=intermediate_data.\
apply(lambda line: \
_count_total_reads(data=line,
seqrun_list=list(seqrun_set)),
axis=1) # count total reads for multiple seq runs
multiple_run_data=intermediate_data[intermediate_data['run_count'] > 1] # check for multi run projects
if len(multiple_run_data.index)==0 and \
'total_read' in multiple_run_data.columns:
intermediate_data.drop('total_read',axis=1,inplace=True) # drop the total read column if all samples are single run
if 'run_count' in intermediate_data.columns:
intermediate_data.drop('run_count',axis=1,inplace=True) # removing run_count column
intermediate_data.fillna(0,inplace=True) # fail safe for missing samples
description = {sample_col: ("string", "Sample ID")} # define description
if len(list(seqrun_set)) >1 and \
'total_read' in intermediate_data.columns:
description.update({"total_read":("number", "Total Reads")}) # add total read column for samples with multiple runs
intermediate_data['total_read']=intermediate_data['total_read'].\
astype(float) # convert column to number
for run in list(seqrun_set):
description.update({run:("number",run)}) # add seqrun columns
intermediate_data[run]=intermediate_data[run].\
astype(float) # convert column to number
column_list=[sample_col] # define column order
column_list.extend(list(seqrun_set))
if len(list(seqrun_set)) > 1 and \
'total_read' in intermediate_data.columns:
column_list.append('total_read') # total read is present only for multiple runs
intermediate_data=intermediate_data.to_dict(orient='records') # convert data frame to json
column_order=tuple(column_list)
return description,intermediate_data,column_order
except:
raise
def _modify_seqrun_data(data_series,seqrun_col,flowcell_col,path_col):
'''
An internal method for parsing seqrun dataframe and adding remote dir path
required columns: seqrun_igf_id, flowcell_id
:param seqrun_col, Column name for sequencing run id, default seqrun_igf_id
:param flowcell_col, Column namae for flowcell id, default flowcell_id
:param path_col, Column name for path, default path
returns a data series with following columns: flowcell_id, path
'''
try:
if not isinstance(data_series,pd.Series):
raise AttributeError('Expecting a pandas data series and got {0}'.\
format(type(data_series)))
seqrun_igf_id=data_series[seqrun_col]
flowcell_id=data_series[flowcell_col]
seqrun_date=get_seqrun_date_from_igf_id(seqrun_igf_id)
data_serie
|
s[path_col]=os.path.join(seqrun_date,flowcell_id) # adding path to data series
del data_series[seqrun_col]
return data_series
except:
raise
def add_seqrun_path_info(input_data,output_file,seqrun_col='seqrun_igf_id',
flowcell_col='flowcell_id',path_col='path'):
'''
A utility method for adding remote path to a dataframe for each sequencing runs
of a project
required params:
:param input_data, A input dataframe containing the
|
following columns
seqrun_igf_id
flowcell_id
:param seqrun_col, Column name for sequencing run id, default seqrun_igf_id
:param flowcell_col, Column namae for flowcell id, default flowcell_id
:param path_col, Column name for path, default path
output_file: An output filepath for the json data
'''
try:
if not isinstance(input_data,pd.DataFrame):
raise AttributeError('Expecting a pandas dataframe and got {0}'.\
format(type(input_data)))
input_data.drop_duplicates(inplace=True) # remove duplicate entries
input_data=input_data.\
apply(lambda line: \
_modify_seqrun_data(data_series=line,
seqrun_col=seqrun_col,
flowcell_col=flowcell_col,
path_col=path_col),
axis=1)
|
ioam/topographica
|
topo/hardware/robotics.py
|
Python
|
bsd-3-clause
| 4,844
| 0.010735
|
"""
Classes for using robotic or other hardware using Topographica.
This module contains several classes for constructing robotics
interfaces to Topographica simulations. It includes modules that read
input from or send output to robot devices, and a (quasi) real-time
simulation object that attempts to maintain a correspondence between
simulation time and real time.
This module requires the PlayerStage robot interface system (from
playerstage.sourceforge.net), and the playerrobot module for
high-level communications with Player robots.
"""
import Image
import ImageOps
from math import pi,cos,sin
import param
from topo.base.simulation import EventProcessor
from imagen.image import GenericImage
from playerrobot import CameraDevice, PTZDevice
class CameraImage(GenericImage):
"""
An image pattern generator that gets its image from a Player
camera device.
"""
camera = param.ClassSelector(CameraDevice,default=None,doc="""
An instance of playerrobot.CameraDevice to be used
to generate images.""")
def __init__(self,**params):
super(CameraImage,self).__init__(**params)
self._image = None
def _get_image(self,params):
self._decode_image(*self.camera.image)
return True
def _decode_image(self,fmt,w,h,bpp,fdiv,data):
if fmt==1:
self._image = Image.new('L',(w,h))
self._image.fromstring(data,'raw')
else:
# JPALERT: if not grayscale, then assume color. This
# should be expanded for other modes.
rgb_im = Image.new('RGB',(w,h))
rgb_im.fromstring(data,'raw')
self._image = ImageOps.grayscale(rgb_im)
class CameraImageQueued(CameraImage):
"""
A version of CameraImage that gets the image from the camera's image queue,
rather than directly from the camera object. Using queues is
necessary when running the playerrobot in a separate process
without shared memory. When getting an image, this pattern
generator will fetch every image in the image queue and use the
most recent as the current pattern.
"""
def _get_image(self,params):
im_spec = None
if self._image is None
|
:
# if we don't have an image then block until we get one
|
im_spec = self.camera.image_queue.get()
self.camera.image_queue.task_done()
# Make sure we clear the image queue and get the most recent image.
while not self.camera.image_queue.empty():
im_spec = self.camera.image_queue.get_nowait()
self.camera.image_queue.task_done()
if im_spec:
# If we got a new image from the queue, then
# construct a PIL image from it.
self._decode_image(*im_spec)
return True
else:
return False
class PTZ(EventProcessor):
"""
Pan/Tilt/Zoom control.
This event processor takes input events on its 'Saccade' input
port in the form of (amplitude,direction) saccade commands (as
produced by the topo.sheet.saccade.SaccadeController class) and
appropriately servoes the attached PTZ object. There is not
currently any dynamic zoom control, though the static zoom level
can be set as a parameter.
"""
ptz = param.ClassSelector(PTZDevice,default=None,doc="""
An instance of playerrobot.PTZDevice to be controlled.""")
zoom = param.Number(default=120,bounds=(0,None),doc="""
Desired FOV width in degrees.""")
speed = param.Number(default=200,bounds=(0,None),doc="""
Desired max pan/tilt speed in deg/sec.""")
invert_amplitude = param.Boolean(default=False,doc="""
Invert the sense of the amplitude signal, in order to get the
appropriate ipsi-/contralateral sense of saccades.""")
dest_ports = ["Saccade"]
src_ports = ["State"]
def start(self):
pass
def input_event(self,conn,data):
if conn.dest_port == "Saccade":
# the data should be (amplitude,direction)
amplitude,direction = data
self.shift(amplitude,direction)
def shift(self,amplitude,direction):
self.debug("Executing shift, amplitude=%.2f, direction=%.2f"%(amplitude,direction))
if self.invert_amplitude:
amplitude *= -1
# if the amplitude is negative, invert the direction, so up is still up.
if amplitude < 0:
direction *= -1
angle = direction * pi/180
pan,tilt,zoom = self.ptz.state_deg
pan += amplitude * cos(angle)
tilt += amplitude * sin(angle)
self.ptz.set_ws_deg(pan,tilt,self.zoom,self.speed,self.speed)
## self.ptz.cmd_queue.put_nowait(('set_ws_deg',
## (pan,tilt,self.zoom,self.speed,self.speed)))
|
mgmtech/sys76_unity_webmic
|
unity_avindicator/webmic.py
|
Python
|
gpl-3.0
| 751
| 0.011984
|
#!/usr/bin/env python
'''
A/V control for System76 laptop using Unity
'''
import os
from execute import returncode
# check for the existence of /dev/video0 which is us
|
ed currently for webcam
webcam = lambda: os.path.exists('/dev/video0') == False
def webcam_toggle():
if webcam():
returncode('sudo /sbin/modprobe uvcvideo')
else:
returncode('sudo /sbin/modprobe -rv uvcvideo')
# use the amixer applicati
|
on to glean the status of the microphone
microphone = lambda: returncode("amixer get Capture | grep Capt | grep off") == 0
microphone_toggle = lambda: returncode("amixer set Capture toggle")
def main():
print "Mic muted ? {0}, Webcam off ? {1}".format(microphone(), webcam())
if __name__ == '__main__':
main()
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-1.2/django/utils/dateformat.py
|
Python
|
bsd-3-clause
| 8,796
| 0.002046
|
"""
PHP date() style date formatting
See http://www.php.net/date for format strings
Usage:
>>> import datetime
>>> d = datetime.datetime.now()
>>> df = DateFormat(d)
>>> print df.format('jS F Y H:i')
7th October 2003 11:39
>>>
"""
import re
import time
import calendar
from django.utils.dates import MONTHS, MONTHS_3, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR
from django.utils.tzinfo import LocalTimezone
from django.utils.translation import ugettext as _
from django.utils.encoding import force_unicode
re_formatchars = re.compile(r'(?<!\\)([aAbBcdDfFgGhHiIjlLmMnNOPrsStTUuwWyYzZ])')
re_escaped = re.compile(r'\\(.)')
class Formatter(object):
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(force_unicode(formatstr))):
if i % 2:
pieces.append(force_unicode(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r'\1', piece))
return u''.join(pieces)
class TimeFormat(Formatter):
def __init__(self, t):
self.data = t
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _('p.m.')
return _('a.m.')
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _('PM')
return _('AM')
def B(self):
"Swatch Internet time"
raise NotImplementedError
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
if self.data.minute == 0:
return self.g()
return u'%s:%s' % (self.g(), self.i())
def g(self):
"Hour, 12-hour format without leading zeros; i.e
|
. '1' to '12'"
if self.data.hour == 0:
return 12
if self.data.hour > 12:
return self.data.hour - 12
return self.data.hour
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return u'%02d' % self.g()
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return u'%02d' %
|
self.G()
def i(self):
"Minutes; i.e. '00' to '59'"
return u'%02d' % self.data.minute
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _('midnight')
if self.data.minute == 0 and self.data.hour == 12:
return _('noon')
return u'%s %s' % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return u'%02d' % self.data.second
def u(self):
"Microseconds"
return self.data.microsecond
class DateFormat(TimeFormat):
year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
def __init__(self, dt):
# Accepts either a datetime or date object.
self.data = dt
self.timezone = getattr(dt, 'tzinfo', None)
if hasattr(self.data, 'hour') and not self.timezone:
self.timezone = LocalTimezone(dt)
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return u'%02d' % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self):
"'1' if Daylight Savings Time, '0' otherwise."
if self.timezone and self.timezone.dst(self.data):
return u'1'
else:
return u'0'
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self):
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return u'%02d' % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def O(self):
"Difference to Greenwich time in hours; e.g. '+0200'"
seconds = self.Z()
return u"%+03d%02d" % (seconds // 3600, (seconds // 60) % 60)
def r(self):
"RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
return self.format('D, j M Y H:i:s O')
def S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return u'th'
last = self.data.day % 10
if last == 1:
return u'st'
if last == 2:
return u'nd'
if last == 3:
return u'rd'
return u'th'
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return u'%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
def T(self):
"Time zone of this machine; e.g. 'EST' or 'MDT'"
name = self.timezone and self.timezone.tzname(self.data) or None
if name is None:
name = self.format('O')
return unicode(name)
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if getattr(self.data, 'tzinfo', None):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple()))
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
# Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
week_number = None
jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
weekday = self.data.weekday() + 1
day_of_year = self.z()
if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year-1)):
week_number = 53
else:
week_number = 52
else:
if calendar.isleap(self.data.year):
i = 366
else:
i = 365
if (i - day_of_year) < (4 - weekday):
week_number = 1
else:
j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
week_number = j // 7
if jan1_weekday > 4:
week_number -= 1
return week_number
def y(self):
"Year, 2 digits; e.g. '99'"
return unicode(self.data.year)[2:]
def Y(self):
"Year, 4 digits; e.g. '1999'"
return self.data.year
def z(self):
"Day of the year; i.e. '0' to '365'"
doy = self.year_days[self.data.month] + self.data.day
if self.L() and self.data.month > 2:
doy += 1
return doy
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always n
|
ctiller/grpc
|
tools/run_tests/artifacts/distribtest_targets.py
|
Python
|
apache-2.0
| 17,548
| 0.000399
|
#!/usr/bin/env python
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of targets run distribution package tests."""
import os.path
import sys
sys.path.insert(0, os.path.abspa
|
th('..'))
import python_utils.jobset as jobset
def create_docker_jobspec(name,
dockerfile_dir,
shell_co
|
mmand,
environ={},
flake_retries=0,
timeout_retries=0,
copy_rel_path=None,
timeout_seconds=30 * 60):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
# the entire repo will be cloned if copy_rel_path is not set.
if copy_rel_path:
environ['RELATIVE_COPY_PATH'] = copy_rel_path
docker_args = []
for k, v in list(environ.items()):
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {
'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'
}
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
docker_args,
environ=docker_env,
shortname='distribtest.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries)
return jobspec
def create_jobspec(name,
cmdline,
environ=None,
shell=False,
flake_retries=0,
timeout_retries=0,
use_workspace=False,
timeout_seconds=10 * 60):
"""Creates jobspec."""
environ = environ.copy()
if use_workspace:
environ['WORKSPACE_NAME'] = 'workspace_%s' % name
cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
] + cmdline
jobspec = jobset.JobSpec(cmdline=cmdline,
environ=environ,
shortname='distribtest.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell)
return jobspec
class CSharpDistribTest(object):
"""Tests C# NuGet package"""
def __init__(self,
platform,
arch,
docker_suffix=None,
use_dotnet_cli=False,
presubmit=False):
self.name = 'csharp_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'csharp', platform, arch]
if presubmit:
self.labels.append('presubmit')
self.script_suffix = ''
if docker_suffix:
self.name += '_%s' % docker_suffix
self.labels.append(docker_suffix)
if use_dotnet_cli:
self.name += '_dotnetcli'
self.script_suffix = '_dotnetcli'
self.labels.append('dotnetcli')
else:
self.labels.append('olddotnet')
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
del inner_jobs # arg unused as there is little opportunity for parallelizing whats inside the distribtests
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/csharp_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/csharp/run_distrib_test%s.sh' %
self.script_suffix,
copy_rel_path='test/distrib')
elif self.platform == 'macos':
return create_jobspec(self.name, [
'test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix
],
environ={'EXTERNAL_GIT_ROOT': '../../../..'},
use_workspace=True)
elif self.platform == 'windows':
if self.arch == 'x64':
# Use double leading / as the first occurrence gets removed by msys bash
# when invoking the .bat file (side-effect of posix path conversion)
environ = {
'MSBUILD_EXTRA_ARGS': '//p:Platform=x64',
'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'
}
else:
environ = {'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\Debug'}
return create_jobspec(self.name, [
'test\\distrib\\csharp\\run_distrib_test%s.bat' %
self.script_suffix
],
environ=environ,
use_workspace=True)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class PythonDistribTest(object):
"""Tests Python package"""
def __init__(self,
platform,
arch,
docker_suffix,
source=False,
presubmit=False):
self.source = source
if source:
self.name = 'python_dev_%s_%s_%s' % (platform, arch, docker_suffix)
else:
self.name = 'python_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'python', platform, arch, docker_suffix]
if presubmit:
self.labels.append('presubmit')
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
# TODO(jtattermusch): honor inner_jobs arg for this task.
del inner_jobs
if not self.platform == 'linux':
raise Exception("Not supported yet.")
if self.source:
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/python_dev_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/python/run_source_distrib_test.sh',
copy_rel_path='test/distrib')
else:
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/python_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/python/run_binary_distrib_test.sh',
copy_rel_path='test/distrib')
def __str__(self):
return self.name
class RubyDistribTest(object):
"""Tests Ruby package"""
def __init__(self,
platform,
arch,
docker_suffix,
ruby_version=None,
source=False,
presubmit=False):
self.package_type = 'binary'
if source:
self.package_type = 'source'
self.name = 'ruby_%s_%s_%s_version_%s_package_type_%s' % (
platform, arch, docker_suffix, ruby_version or
'unspecified', self.package_type)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.ruby_version = ruby_version
self.labels = ['distribtest', 'ruby', platform, arch, docker_suffix]
if presubmit:
self.labels.append('presubmit')
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
# TOD
|
trevor/calendarserver
|
calendarserver/tap/util.py
|
Python
|
apache-2.0
| 38,773
| 0.00227
|
# -*- test-case-name: calendarserver.tap.test.test_caldav -*-
##
# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Utilities for assembling the service and resource hierarchy.
"""
__all__ = [
"getRootResource",
"getDBPool",
"FakeRequest",
"MemoryLimitService",
]
import errno
import os
from socket import fromfd, AF_UNIX, SOCK_STREAM, socketpair
import psutil
from twext.python.filepath import CachingFilePath as FilePath
from twext.python.log import Logger
from txweb2.auth.basic import BasicCredentialFactory
from txweb2.dav import auth
from txweb2.dav.util import joinURL
from txweb2.http_headers import Headers
from txweb2.resource import Resource
from txweb2.static import File as FileResource
from twisted.application.service import Service
from twisted.cred.portal import Portal
from twisted.internet.defer import inlineCallbacks, returnValue, Deferred, succeed
from twisted.internet import reactor as _reactor
from twisted.internet.reactor import addSystemEventTrigger
from twisted.internet.tcp import Connection
from calendarserver.push.applepush import APNSubscriptionResource
from calendarserver.push.notifier import NotifierFactory
from twext.enterprise.adbapi2 import ConnectionPool, ConnectionPoolConnection
from twext.enterprise.ienterprise import ORACLE_DIALECT
from twext.enterprise.ienterprise import POSTGRES_DIALECT
from twistedcaldav.bind import doBind
from twistedcaldav.cache import CacheStoreNotifierFactory
from twistedcaldav.directory.addressbook import DirectoryAddressBookHomeProvisioningResource
from twistedcaldav.directory.calendar import DirectoryCalendarHomeProvisioningResource
from twistedcaldav.directory.digest import QopDigestCredentialFactory
from twistedcaldav.directory.principal import DirectoryPrincipalProvisioningResource
from twistedcaldav.directorybackedaddressbook import DirectoryBackedAddressBookResource
from twistedcaldav.resource import AuthenticationWrapper
from twistedcaldav.simpleresource import SimpleResource, SimpleRedirectResource
from twistedcaldav.timezones import TimezoneCache
from twistedcaldav.timezoneservice import TimezoneServiceResource
from twistedcaldav.timezonestdservice import TimezoneStdServiceResource
from txdav.caldav.datastore.scheduling.ischedule.dkim import DKIMUtils, DomainKeyResource
from txdav.caldav.datastore.scheduling.ischedule.resource import IScheduleInboxResource
try:
from twistedcaldav.authkerb import NegotiateCredentialFactory
NegotiateCredentialFactory # pacify pyflakes
except ImportError:
NegotiateCredentialFactory = None
from twext.enterprise.adbapi2 import ConnectionPoolClient
from txdav.base.datastore.dbapiclient import DBAPIConnector, OracleConnector
from txdav.base.datastore.dbapiclient import postgresPreflight
from txdav.base.datastore.subpostgres import PostgresService
from calendarserver.accesslog import DirectoryLogWrapperResource
from calendarserver.provision.root import RootResource
from calendarserver.tools.util import checkDirectory
from calendarserver.webadmin.landing import WebAdminLandingResource
from calendarserver.webcal.resource import WebCalendarResource
from txdav.common.datastore.podding.resource import ConduitResource
from txdav.common.datastore.sql import CommonDataStore as CommonSQLDataStore
from txdav.common.datastore.file import CommonDataStore as CommonFileDataStore
from txdav.common.datastore.sql import current_sql_schema
from txdav.common.datastore.upgrade.sql.upgrade import NotAllowedToUpgrade
from twext.python.filepath import CachingFilePath
from urllib import quote
from twisted.python.usage import UsageError
from twext.who.checker import UsernamePasswordCredentialChecker
from twext.who.checker import HTTPDigestCredentialChecker
from twisted.cred.error import UnauthorizedLogin
from txweb2.dav.auth import IPrincipalCredentials
log = Logger()
def pgServiceFromConfig(config, subServiceFactory, uid=None, gid=None):
"""
Construct a L{PostgresService} from a given configuration and subservice.
@param config: the configuration to derive postgres configuration
parameters from.
@param subServiceFactory: A factory for the service to start once the
L{PostgresService} has been initialized.
@param uid: The user-ID to run the PostgreSQL server as.
@param gid: The group-ID to run the PostgreSQL server as.
@return: a service which can start postgres.
@rtype: L{PostgresService}
"""
dbRoot = CachingFilePath(config.DatabaseRoot)
# Construct a PostgresService exactly as the parent would, so that we
# can establish connection information.
return PostgresService(
dbRoot, subServiceFactory, current_sql_schema,
databaseName=config.Postgres.DatabaseName,
clusterName=config.Postgres.ClusterName,
logFile=config.Postgres.LogFile,
logDirectory=config.LogRoot if config.Postgres.LogRotation else "",
socketDir=config.Postgres.SocketDirectory,
listenAddresses=config.Postgres.ListenAddresses,
sharedBuffers=config.Postgres.SharedBuffers,
maxConnections=config.Postgres.MaxConnections,
options=config.Postgres.Options,
uid=uid, gid=gid,
spawnedDBUser=config.SpawnedDBUser,
importFileName=config.DBImportFile,
pgCtl=config.Postgres.Ctl,
initDB=config.Postgres.Init,
)
def pgConnectorFromConfig(config):
"""
Create a postgres DB-API connector from the given configuration.
"""
import pgdb
return DBAPIConnector(pgdb, postgresPreflight, config.DSN).connect
def oracleConnectorFromConfig(config):
"""
Create a postgres DB-API connector from the given configuration.
"""
return OracleConnector(config.DSN).connect
class ConnectionWithPeer(Connection):
connected = True
def getPeer(self):
return "<peer: %r %r>" % (self.socket.fileno(), id(self))
def getHost(self):
return "<host: %r %r>" % (self.socket.fileno(), id(self))
def transactionFactoryFromFD(dbampfd, dialect, paramstyle):
"""
Create a transaction factory from an inherited file descriptor, such as one
created by L{ConnectionDispenser}.
"""
skt = fromfd(dbampfd, AF_UNIX, SOCK_STREAM)
os.close(dbampfd)
protocol = ConnectionPoolClient(dialect=dialect, paramstyle=paramstyle)
transport
|
= ConnectionWithPeer(skt, protocol)
protocol.makeConnection(transport)
transport.startReading()
return protocol.newTransaction
class ConnectionDispenser(object):
"""
A L{ConnectionDispenser} can dispense already-connected file descriptors,
for use with subprocess spawning.
"""
# Very long term FIXME: this mechanism should ideally be eliminated, by
# making all subprocesses have
|
a single stdio AMP connection that
# multiplexes between multiple protocols.
def __init__(self, connectionPool):
self.pool = connectionPool
def dispense(self):
"""
Dispense a socket object, already connected to a server, for a client
in a subprocess.
"""
# FIXME: these sockets need to be re-dispensed when the process is
# respawned, and they currently won't be.
c, s = socketpair(AF_UNIX, SOCK_STREAM)
protocol = ConnectionPoolConnection(self.pool)
transport = ConnectionWithPeer(s, protocol)
protocol.makeConnection(transport)
transport.startReading()
return c
def storeFromConfig(config, txnFactory, directoryService):
"""
Produce an L{IDataStore} from the given configuration, transaction factory,
and notifier factory.
I
|
pbmanis/acq4
|
acq4/devices/PatchPipette/__init__.py
|
Python
|
mit
| 77
| 0
|
from __future__ im
|
port print_function
from
|
.patchpipette import PatchPipette
|
ahmedaljazzar/edx-platform
|
common/djangoapps/util/models.py
|
Python
|
agpl-3.0
| 1,962
| 0.001019
|
"""Models for the ut
|
il app. """
import cStringIO
import gzip
import logging
from config_models.models import ConfigurationModel
from django.db import models
from django.utils.text import compress_string
from opaque_keys.edx.django.models import CreatorMixin
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class RateLimitConfiguration(ConfigurationModel):
|
"""Configuration flag to enable/disable rate limiting.
Applies to Django Rest Framework views.
This is useful for disabling rate limiting for performance tests.
When enabled, it will disable rate limiting on any view decorated
with the `can_disable_rate_limit` class decorator.
"""
class Meta(ConfigurationModel.Meta):
app_label = "util"
def decompress_string(value):
"""
Helper function to reverse CompressedTextField.get_prep_value.
"""
try:
val = value.encode('utf').decode('base64')
zbuf = cStringIO.StringIO(val)
zfile = gzip.GzipFile(fileobj=zbuf)
ret = zfile.read()
zfile.close()
except Exception as e:
logger.error('String decompression failed. There may be corrupted data in the database: %s', e)
ret = value
return ret
class CompressedTextField(CreatorMixin, models.TextField):
""" TextField that transparently compresses data when saving to the database, and decompresses the data
when retrieving it from the database. """
def get_prep_value(self, value):
""" Compress the text data. """
if value is not None:
if isinstance(value, unicode):
value = value.encode('utf8')
value = compress_string(value)
value = value.encode('base64').decode('utf8')
return value
def to_python(self, value):
""" Decompresses the value from the database. """
if isinstance(value, unicode):
value = decompress_string(value)
return value
|
erjac77/ansible-module-f5bigip
|
library/f5bigip_ltm_profile_sip.py
|
Python
|
apache-2.0
| 8,676
| 0.003112
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2018, Eric Jacob <erjac77@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_ltm_profile_sip
short_description: BIG-IP ltm profile sip module
description:
- Configures a Session Initiation Protocol (SIP) profile.
version_added: "2.4"
author:
- "Gabriel Fortin (@GabrielFortin)"
options:
alg_enable:
description:
- Enables or disables the SIP ALG (Application Level Gateway) feature.
default: disabled
choices: ['disabled', 'enabled']
app_service:
description:
- Specifies the name of the application service to which the object belongs.
community:
description:
- Specifies the community to which you want to assign the virtual server that you associate with this
profile.
defaults_from:
description:
- Specifies the profile that you want to use as the parent profile.
default: sip
description:
description:
- User defined description.
dialog_aware:
description:
- Enables or disables the ability for the system to be aware of unauthorized use of the SIP dialog.
default: disabled
choices: ['disabled', 'enabled']
dialog_establishment_timeout:
description:
- Indicates the timeout value for dialog establishment in a sip session.
default: 10
enable_sip_firewall:
description:
- Indicates whether to enable SIP firewall functionality or not.
default: no
choices: ['no', 'yes']
insert_record_route_header:
description:
- Enables or disables the insertion of a Record-Route header, which indicates the next hop for the following
SIP request messages.
default: disabled
choices: ['disabled', 'enabled']
insert_via_header:
description:
- Enables or disables the insertion of a Via header, which indicates where the message originated.
default: disabled
choices: ['disabled', 'enabled']
log_profile:
description:
- Specify the name of the ALG log profile which controls the logging of ALG .
log_publisher:
description:
- Specify the name of the log publisher which logs translation events.
max_media_sessions:
description:
- Indicates the maximum number of SDP media sessions that the BIG-IP system accepts.
default: 6
max_registrations:
description:
- Indicates the maximum number of registrations, the maximum allowable REGISTER messages can be recorded
that the BIG-IP system accepts.
default: 100
max_sessions_per_registration:
description:
- Indicates the maximum number of calls or sessions can be made by a user for a single registration that the
BIG-IP system accepts.
default: 50
max_size:
description:
- Specifies the maximum SIP message size that the BIG-IP system accepts.
default: 65535
name:
description:
- Specifies a unique name for the component.
required: true
partition:
description:
- Displays the administrative partition within which the component resides.
registration_timeout:
description:
- Indicates the timeout value for a sip registration.
default: 3600
rtp_proxy_style:
description:
- Indicates the style in which the RTP will proxy the data.
default: symmetric
choices: ['symmetric', 'restricted-by-ip-address', 'any-location']
secure_via_header:
description:
- Enables or disables the insertion of a Secure Via header, which indicates where the message originated.
default: disabled
choices: ['disabled', 'enabled']
security:
description:
- Enables or disables security for the SIP profile.
default: disabled
choices: ['disabled', 'enabled']
sip_session_timeout:
description:
- Indicates the timeout value for a sip session.
default: 300
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
terminate_on_bye:
description:
- Enables or disables the termination of a connection when a BYE transaction finishes.
default: enabled
choices: ['disabled', 'enabled']
user_via_header:
description:
- Enables or disables the insertion of a Via header specified by a system administrator.
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create LTM Profile sip
f5bigip_ltm_profile_sip:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: my_sip_profile
partition: Common
description: My sip profile
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible_common_f5.base import F5_ACTIVATION_CHOICES
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_POLAR_CHOICES
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
alg_enable=dict(type='str', choices=F5_ACTIVATION_CHOICES),
app_service=dict(type='str'),
community=dict(type='str'),
defaults_from=dict(ty
|
pe='str'),
description=dict(type='str'),
dialog_aware=dict(type='str', choices=F5_ACTIVATION_CHOICES),
dialog_establishment_timeout=dict(type='int'),
enable_sip_firewall=dict(type='str', choices=F5_POLAR_CHOICES),
insert_record
|
_route_header=dict(type='str', choices=F5_ACTIVATION_CHOICES),
insert_via_header=dict(type='str', choices=F5_ACTIVATION_CHOICES),
log_profile=dict(type='str'),
log_publisher=dict(type='str'),
max_media_sessions=dict(type='int'),
max_registrations=dict(type='int'),
max_sessions_per_registration=dict(type='int'),
max_size=dict(type='int'),
registration_timeout=dict(type='int'),
rtp_proxy_style=dict(type='str', choices=['symmetric', 'restricted-by-ip-address', 'any-location']),
secure_via_header=dict(type='str', choices=F5_ACTIVATION_CHOICES),
security=dict(type='str', choices=F5_ACTIVATION_CHOICES),
sip_session_timeout=dict(type='int'),
terminate_on_bye=dict(type='str', choices=F5_ACTIVATION_CHOICES),
user_via_header=dict(type='str')
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpLtmProfileSip(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'create': self._api.tm.ltm.profile.sips.sip.create,
'read': self._api.tm.ltm.profile.sips.sip.load,
'update': self._api.tm.ltm.profile.sips.sip.update,
'delete': self._api.tm.ltm.profile.sips.sip.de
|
2ndQuadrant/ansible
|
lib/ansible/modules/cloud/ovirt/ovirt_auth.py
|
Python
|
gpl-3.0
| 11,230
| 0.002048
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_auth
short_description: "Module to manage authentication to oVirt/RHV"
author: "Ondra Machacek (@machacekondra)"
version_added: "2.2"
description:
- "This module authenticates to oVirt/RHV engine and creates SSO token, which should be later used in
all other oVirt/RHV modules, so all modules don't need to perform login and logout.
This module returns an Ansible fact called I(ovirt_auth). Every module can use this
fact as C(auth) parameter, to perform authentication."
options:
state:
default: present
choices: ['present', 'absent']
description:
- "Specifies if a token should be created or revoked."
username:
required: False
description:
- "The name of the user. For example: I(admin@internal)
Default value is set by I(OVIRT_USERNAME) environment variable."
password:
required: False
description:
- "The password of the user. Default value is set by I(OVIRT_PASSWORD) environment variable."
token:
required: False
description:
- "SSO token to be used instead of login with username/password.
Default value is set by I(OVIRT_TOKEN) environment variable."
version_added: 2.5
url:
required: False
description:
- "A string containing the API URL of the server.
For example: I(https://server.example.com/ovirt-engine/api)
|
.
Default value is set by I(OVIRT_URL) environment variable."
- "Either C(url) or C(hostname) is required."
hostname:
required: False
description:
- "A string containing the hostname of the server.
For example: I(server.example.com).
Default value is set by I(OVIRT_HOSTNAME) environment
|
variable."
- "Either C(url) or C(hostname) is required."
version_added: "2.6"
insecure:
required: False
description:
- "A boolean flag that indicates if the server TLS certificate and host name should be checked."
type: bool
ca_file:
required: False
description:
- "A PEM file containing the trusted CA certificates. The
certificate presented by the server will be verified using these CA
certificates. If C(ca_file) parameter is not set, system wide
CA certificate store is used.
Default value is set by I(OVIRT_CAFILE) environment variable."
timeout:
required: False
description:
- "The maximum total time to wait for the response, in
seconds. A value of zero (the default) means wait forever. If
the timeout expires before the response is received an exception
will be raised."
compress:
required: False
description:
- "A boolean flag indicating if the SDK should ask
the server to send compressed responses. The default is I(True).
Note that this is a hint for the server, and that it may return
uncompressed data even when this parameter is set to I(True)."
type: bool
kerberos:
required: False
description:
- "A boolean flag indicating if Kerberos authentication
should be used instead of the default basic authentication."
type: bool
headers:
required: False
description:
- "A dictionary of HTTP headers to be added to each API call."
version_added: "2.4"
requirements:
- python >= 2.7
- ovirt-engine-sdk-python >= 4.3.0
notes:
- "Everytime you use ovirt_auth module to obtain ticket, you need to also revoke the ticket,
when you no longer need it, otherwise the ticket would be revoked by engine when it expires.
For an example of how to achieve that, please take a look at I(examples) section."
- "In order to use this module you have to install oVirt/RHV Python SDK.
To ensure it's installed with correct version you can create the following task:
I(pip: name=ovirt-engine-sdk-python version=4.3.0)"
- "Note that in oVirt/RHV 4.1 if you want to use a user which is not administrator
you must enable the I(ENGINE_API_FILTER_BY_DEFAULT) variable in engine. In
oVirt/RHV 4.2 and later it's enabled by default."
'''
EXAMPLES = '''
- block:
# Create a vault with `ovirt_password` variable which store your
# oVirt/RHV user's password, and include that yaml file with variable:
- include_vars: ovirt_password.yml
- name: Obtain SSO token with using username/password credentials
ovirt_auth:
url: https://ovirt.example.com/ovirt-engine/api
username: admin@internal
ca_file: ca.pem
password: "{{ ovirt_password }}"
# Previous task generated I(ovirt_auth) fact, which you can later use
# in different modules as follows:
- ovirt_vm:
auth: "{{ ovirt_auth }}"
state: absent
name: myvm
always:
- name: Always revoke the SSO token
ovirt_auth:
state: absent
ovirt_auth: "{{ ovirt_auth }}"
# When user will set following environment variables:
# OVIRT_URL = https://fqdn/ovirt-engine/api
# OVIRT_USERNAME = admin@internal
# OVIRT_PASSWORD = the_password
# User can login the oVirt using environment variable instead of variables
# in yaml file.
# This is mainly useful when using Ansible Tower or AWX, as it will work
# for Red Hat Virtualization credentials type.
- name: Obtain SSO token
ovirt_auth:
state: present
'''
RETURN = '''
ovirt_auth:
description: Authentication facts, needed to perform authentication to oVirt/RHV.
returned: success
type: complex
contains:
token:
description: SSO token which is used for connection to oVirt/RHV engine.
returned: success
type: str
sample: "kdfVWp9ZgeewBXV-iq3Js1-xQJZPSEQ334FLb3eksoEPRaab07DhZ8ED8ghz9lJd-MQ2GqtRIeqhvhCkrUWQPw"
url:
description: URL of the oVirt/RHV engine API endpoint.
returned: success
type: str
sample: "https://ovirt.example.com/ovirt-engine/api"
ca_file:
description: CA file, which is used to verify SSL/TLS connection.
returned: success
type: str
sample: "ca.pem"
insecure:
description: Flag indicating if insecure connection is used.
returned: success
type: bool
sample: False
timeout:
description: Number of seconds to wait for response.
returned: success
type: int
sample: 0
compress:
description: Flag indicating if compression is used for connection.
returned: success
type: bool
sample: True
kerberos:
description: Flag indicating if kerberos is used for authentication.
returned: success
type: bool
sample: False
headers:
description: Dictionary of HTTP headers to be added to each API call.
returned: success
type: dict
'''
import os
import traceback
try:
import ovirtsdk4 as sdk
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import check_sdk
def main():
module = AnsibleModule(
argument_spec=dict(
url=dict(default=None),
hostname=dict(default=None),
username=dict(default=None),
password=dict(default=None, no_log=True),
ca_file=dict(default=None, type='pa
|
davidveen/nolava
|
src/main.py
|
Python
|
gpl-3.0
| 234
| 0.004274
|
"""
Application entry point
"""
def main():
pass
if __name__ == "__ma
|
in__":
# delegates to main_debug during construction
try:
import main_debug
main_debug.main()
e
|
xcept ImportError:
main()
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/social/backends/xing.py
|
Python
|
agpl-3.0
| 1,519
| 0
|
"""
XING OAuth1 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/xing.html
"""
from social.backends.oauth import BaseOAuth1
class XingOAuth(BaseOAuth1):
"""Xing OAuth authentication backend"""
name = 'xing'
AUTHORIZATION_URL = 'https://api.xing.com/v1/authorize'
REQUEST_TOKEN_URL = 'https://api.xing.com/v1/request_token'
ACCESS_TOKEN_URL = 'https://api.xing.com/v1/access_token'
SCOPE_SEPARA
|
TOR = '+'
EXTRA_DATA = [
('id', 'id'),
('user_id', 'user_id')
]
def get_user_details(self, response):
"""Return user details from Xing account"""
email = r
|
esponse.get('email', '')
fullname, first_name, last_name = self.get_user_names(
first_name=response['first_name'],
last_name=response['last_name']
)
return {'username': first_name + last_name,
'fullname': fullname,
'first_name': first_name,
'last_name': last_name,
'email': email}
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
profile = self.get_json(
'https://api.xing.com/v1/users/me.json',
auth=self.oauth_auth(access_token)
)['users'][0]
return {
'user_id': profile['id'],
'id': profile['id'],
'first_name': profile['first_name'],
'last_name': profile['last_name'],
'email': profile['active_email']
}
|
NLeSC/Xenon-examples
|
readthedocs/code-tabs/python/tests/test_slurm_queues_getter_with_props.py
|
Python
|
apache-2.0
| 192
| 0.005208
|
#!/usr/bin/env python
import pytest
from pyxenon_snippets import slurm_queues_get
|
ter_with_props
def test_slurm_queues_getter_with_p
|
rops():
slurm_queues_getter_with_props.run_example()
|
homeworkprod/byceps
|
byceps/services/shop/article/dbmodels/article.py
|
Python
|
bsd-3-clause
| 3,124
| 0.00128
|
"""
byceps.services.shop.article.dbmodels.article
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from datetime import datetime
from decimal import Decimal
from typing import Optional
from sqlalchemy.ext.hybrid import hybrid_property
from .....database import db, generate_uuid
from .....util.instances import ReprBuilder
from ...shop.transfer.models import ShopID
from ..transfer.models import ArticleNumber, ArticleType
class Article(db.Model):
"""An article that can be bought."""
__tablename__ = 'shop_articles'
__table_args__ = (
db.UniqueConstraint('shop_id', 'description'),
db.CheckConstraint('available_from < available_until'),
)
id = db.Column(db.Uuid, default=generate_uuid, primary_key=True)
shop_id = db.Column(db.UnicodeText, db.ForeignKey('shops.id'), index=True, nullable=False)
item_number = db.Column(db.UnicodeText, unique=True, nullable=False)
_type = db.Column('type', db.UnicodeText, nullable=False)
description = db.Column(db.UnicodeText, nullable=False)
price = db.Column(db.Numeric(6, 2), nullable=False)
tax_rate = db.Column(db.N
|
umeric(3, 3), nullable=False)
available_from = db.Column(db.DateTime, nullable=True)
available_until = db.Column
|
(db.DateTime, nullable=True)
total_quantity = db.Column(db.Integer, nullable=False)
quantity = db.Column(db.Integer, db.CheckConstraint('quantity >= 0'), nullable=False)
max_quantity_per_order = db.Column(db.Integer, nullable=False)
not_directly_orderable = db.Column(db.Boolean, default=False, nullable=False)
separate_order_required = db.Column(db.Boolean, default=False, nullable=False)
shipping_required = db.Column(db.Boolean, nullable=False)
def __init__(
self,
shop_id: ShopID,
item_number: ArticleNumber,
type_: ArticleType,
description: str,
price: Decimal,
tax_rate: Decimal,
total_quantity: int,
max_quantity_per_order: int,
shipping_required: bool,
*,
available_from: Optional[datetime] = None,
available_until: Optional[datetime] = None,
) -> None:
self.shop_id = shop_id
self.item_number = item_number
self._type = type_.name
self.description = description
self.price = price
self.tax_rate = tax_rate
self.available_from = available_from
self.available_until = available_until
self.total_quantity = total_quantity
self.quantity = total_quantity # Initialize with total quantity.
self.max_quantity_per_order = max_quantity_per_order
self.shipping_required = shipping_required
@hybrid_property
def type_(self) -> ArticleType:
return ArticleType[self._type]
def __repr__(self) -> str:
return ReprBuilder(self) \
.add_with_lookup('id') \
.add('shop', self.shop_id) \
.add_with_lookup('item_number') \
.add_with_lookup('description') \
.build()
|
idaholab/raven
|
framework/SupervisedLearning/ScikitLearn/LinearModel/LassoLarsCV.py
|
Python
|
apache-2.0
| 6,473
| 0.00896
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jan 21, 2020
@author: alfoa, wangc
Cross-validated Lasso, using the LARS algorithm.
"""
#Internal Modules (Lazy Importer)--------------------------------------------------------------------
from numpy import finfo
#Internal Modules (Lazy Importer) End----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from SupervisedLearning.ScikitLearn import ScikitLearnBase
from utils import InputData, InputTypes
#Internal Modules End--------------------------------------------------------------------------------
class LassoLarsCV(ScikitLearnBase):
"""
Cross-validated Lasso, using the LARS algorithm
"""
info = {'problemtype':'regression', 'normalize':False}
def __init__(self):
"""
Constructor that will appropriately initialize a supervised learning object
@ In, None
@ Out, None
"""
super().__init__()
import sklearn
import sklearn.linear_model
self.model = sklearn.linear_model.LassoLarsCV
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(LassoLarsCV, cls).getInputSpecification()
specs.description = r"""The \xmlNode{LassoLarsCV} (\textit{Cross-validated Lasso model fit with Least Angle Regression})
This model is an augomentation of the LassoLars model with the addition of
cross validation tecniques.
The optimization objective for Lasso is:
\begin{equation}
|
(1 / (2 * n\_samples)) * ||y - Xw||^2\_2 + alpha * ||w||\_1
\end{equation}
\zNormalizationNotPerformed{LassoLarsCV}
"""
specs.addSub(InputData.parameterInputFactory("fit_intercept", contentType=InputTypes.BoolType,
descr=r"""Whether the intercept should be estimated or n
|
ot. If False,
the data is assumed to be already centered.""", default=True))
specs.addSub(InputData.parameterInputFactory("max_iter", contentType=InputTypes.IntegerType,
descr=r"""The maximum number of iterations.""", default=500))
specs.addSub(InputData.parameterInputFactory("normalize", contentType=InputTypes.BoolType,
descr=r"""This parameter is ignored when fit_intercept is set to False. If True,
the regressors X will be normalized before regression by subtracting the mean and
dividing by the l2-norm.""", default=True))
specs.addSub(InputData.parameterInputFactory("precompute", contentType=InputTypes.StringType,
descr=r"""Whether to use a precomputed Gram matrix to speed up calculations.
For sparse input this option is always True to preserve sparsity.""", default='auto'))
specs.addSub(InputData.parameterInputFactory("max_n_alphas", contentType=InputTypes.IntegerType,
descr=r"""The maximum number of points on the path used to compute the residuals in
the cross-validation""", default=1000))
specs.addSub(InputData.parameterInputFactory("eps", contentType=InputTypes.FloatType,
descr=r"""The machine-precision regularization in the computation of the Cholesky
diagonal factors. Increase this for very ill-conditioned systems. Unlike the tol
parameter in some iterative optimization-based algorithms, this parameter does not
control the tolerance of the optimization.""", default=finfo(float).eps))
specs.addSub(InputData.parameterInputFactory("positive", contentType=InputTypes.BoolType,
descr=r"""When set to True, forces the coefficients to be positive.""", default=False))
specs.addSub(InputData.parameterInputFactory("cv", contentType=InputTypes.IntegerType,
descr=r"""Determines the cross-validation splitting strategy.
It specifies the number of folds..""", default=None))
specs.addSub(InputData.parameterInputFactory("verbose", contentType=InputTypes.BoolType,
descr=r"""Amount of verbosity.""", default=False))
return specs
def _handleInput(self, paramInput):
"""
Function to handle the common parts of the distribution parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
settings, notFound = paramInput.findNodesAndExtractValues(['fit_intercept','max_iter', 'normalize', 'precompute',
'max_n_alphas','eps','positive','cv', 'verbose'])
# notFound must be empty
assert(not notFound)
self.initializeModel(settings)
|
ericmjl/bokeh
|
examples/reference/models/Triangle.py
|
Python
|
bsd-3-clause
| 749
| 0.001335
|
import numpy as np
from bokeh.io import curdoc, show
from bokeh.models import ColumnDataSource, Grid, LinearAxis, Plot, Triangle
N = 9
x = np.linspace(-2, 2, N)
y = x**2
sizes = np.linspace(10, 20, N)
source = ColumnDataSource(dict(x=x, y=y, sizes=sizes))
plot = Plot(
title=None, plot_width=300, plot_height=300,
min_border=0, toolbar_location=None)
glyph = Trian
|
gle(x="x", y="y", size="sizes", line_color="#99d594", line_width=2, fill_color=None
|
)
plot.add_glyph(source, glyph)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
curdoc().add_root(plot)
show(plot)
|
krazybean/message_agent_abandoned
|
lin/lin_notify_lib.py
|
Python
|
apache-2.0
| 282
| 0.01773
|
#!/us
|
r/bin/env python
import pynotify
'''
No purpose here other than creating a callable library for system notifications
'''
class message:
def
|
__init__(self, messagex):
pynotify.init('EventCall')
m = pynotify.Notification("RSEvent Notification", "%s" % messagex)
m.show()
|
tiborsimko/invenio-records
|
invenio_records/admin.py
|
Python
|
mit
| 2,051
| 0.000488
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Admin model views for records."""
import json
from flask import flash
from flask_admin.contrib.sqla import ModelView
from flask_babelex import gettext as _
from invenio_admin.filters import FilterConverter
from invenio_db import db
from markupsafe import Markup
from sqlalchemy.exc import SQLAlchemyError
from .api import Record
from .models import RecordMetadata
class RecordMetadataModelView(ModelView):
"""Records admin model view."""
filter_converter = FilterConverter()
can_create = False
can_edit = False
can_delete = True
can_view_details = True
column_list = ('id', 'version_id', 'updated', 'created',)
column_details_list = ('id', 'version_id', 'updated', 'created', 'json')
column_labels = dict(
id=_('UUID'),
version_id=_('Revision'),
json=_('JSON'),
)
column_formatters = dict(
version_id=lambda v, c, m, p: m.version_id-1,
json=lambda v, c, m, p: Markup("<pre>{0}</pre>".format(
json.dumps(m.json, indent=2, sort_key
|
s=True)))
)
column_filters = ('created', 'updated', )
column_default_sort = ('updated', True)
page_size = 25
def delete_model(self, model):
"""Delete a record."
|
""
try:
if model.json is None:
return True
record = Record(model.json, model=model)
record.delete()
db.session.commit()
except SQLAlchemyError as e:
if not self.handle_view_exception(e):
flash(_('Failed to delete record. %(error)s', error=str(e)),
category='error')
db.session.rollback()
return False
return True
record_adminview = dict(
modelview=RecordMetadataModelView,
model=RecordMetadata,
category=_('Records'))
|
zmathe/WebAppDIRAC
|
WebApp/handler/Palette.py
|
Python
|
gpl-3.0
| 3,496
| 0.016304
|
import hashlib as md5
class Palette:
def __init__(self, palette={}, colors=[]):
self.job_status_palette = {
'Received': '#D9E7F8',
'Checking': '#FAFAFA',
'Staging': '#6190CD',
'Waiting': '#004EFF',
'Matched': '#FEF7AA',
'Running': '#FDEE65',
'Stalled': '#BC5757',
'Completed': '#00FF21',
'Done': '#238802',
'Failed': '#FF0000',
'failed': '#FF0000',
'Killed': '#111111'
}
self.job_minor_status_palette = {
"AncestorDepth Not Found" : '#BAA312',
'Application Finished With Errors' : '#BC2133',
'BK Input Data Not Available' : '#E6D600',
'BK-LFC Integrity Check Failed' : '#BC1143',
'Can not get Active and Banned Sites from JobDB' : '#84CBFF',
'Chosen site is not eligible' : '#B4A243',
'Error Sending Staging Request' : '#B4A243',
'Exceeded Maximum Dataset Limit (100)' : '#BA5C9D',
'Exception During Execution' : '#AA240C',
'Execution Complete' : '#338B39',
'Failed to access database' : '#FFE267',
'File Catalog Access Failure' : '#FF8000',
'Illegal Job JDL' : '#D96C00',
'Impossible Site + InputData Requirement' : '#BDA822',
'Impossible Site Requirement' : '#F87500',
'Input Data Not Available' : '#2822A6',
'Input Data Resolution' : '#FFBE94',
'Input Sandbox Download' : '#586CFF',
'Input data contains //' : '#AB7800',
'Input data not correctly specified' : '#6812D6',
'Job Wrapper Initialization' : '#FFFFCC',
'Job has exceeded maximum wall clock time' : '#FF33CC',
'Job has insufficient disk space to continue' : '#33FFCC',
'Job has reached the CPU limit of the queue' : '#AABBCC',
'No Ancestors Found For Input Data' : '#BDA544',
'No candidate sites available' : '#E2FFBC',
'No eligible sites for job' : '#A8D511',
'Parameter not found' : '#FFB80C',
'Pending Requests' :
|
'#52FF4F',
'Received Kill signal' : '#FF312F',
'Socket read timeout exceeded' : '#B400FE',
'Stalled' : '#FF655E',
'Uploading Job Outputs' : '#FE8420',
'Watchdog identified this job as stalled' : '#FFCC99'
}
self.miscelaneous_pallette = {
'Others': '#666666',
|
'NoLabels': '#0025AD',
'Total': '#00FFDC',
'Default': '#FDEE65'
}
self.country_palette = {
'France':'#73C6BC',
'UK':'#DCAF8A',
'Spain':'#C2B0E1',
'Netherlands':'#A9BF8E',
'Germany':'#800000',
'Russia':'#00514A',
'Italy':'#004F00',
'Switzerland':'#433B00',
'Poland':'#528220',
'Hungary':'#825CE2',
'Portugal':'#009182',
'Turkey':'#B85D00'
}
self.palette = self.country_palette
self.palette.update(self.job_status_palette)
self.palette.update(self.miscelaneous_pallette)
self.palette.update(self.job_minor_status_palette)
def setPalette(self, palette):
self.palette = palette
def setColor(self, label, color):
self.palette[label] = color
def addPalette(self, palette):
self.palette.update(palette)
def getColor(self, label):
if label in self.palette.keys():
return self.palette[label]
else:
return self.generateColor(label)
def generateColor(self, label):
if label == None: label = str(label)
myMD5 = md5.md5()
myMD5.update(str(label))
hexstring = myMD5.hexdigest()
color = "#" + hexstring[:6]
return color
|
tanglu-org/tgl-misago
|
misago/migrations/0015_remove_users_reported.py
|
Python
|
gpl-3.0
| 33,856
| 0.008034
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
orm.MonitorItem.objects.filter(pk='users_reported').delete()
def backwards(self, orm):
raise RuntimeError("Cannot reverse this migration.")
models = {
'misago.alert': {
'Meta': {'object_name': 'Alert'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.User']"}),
'variables': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'misago.ban': {
'Meta': {'object_name': 'Ban'},
'ban': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason_admin': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reason_user': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'test': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'misago.change': {
'Meta': {'object_name': 'Change'},
'agent': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'change': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Forum']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Post']"}),
'post_content': ('django.db.models.fields.TextField', [], {}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Thread']"}),
'thread_name_new': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'thread_name_old': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_slug': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'misago.checkpoint': {
'Meta': {'object_name': 'Checkpoint'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'agent': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Forum']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'old_forum': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['misago.Forum']"}),
'old_forum_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'old_forum_slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'target_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.User']"}),
'target_user_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'target_user_slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.User']", 'null': 'True', 'on_delete': 'mod
|
els.SET_NU
|
LL', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_slug': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'misago.fixture': {
'Meta': {'object_name': 'Fixture'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'misago.forum': {
'Meta': {'object_name': 'Forum'},
'attrs': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_preparsed': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_poster': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.User']"}),
'last_poster_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_poster_slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_poster_style': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_thread': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.Thread']"}),
'last_thread_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_thread_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_thread_slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['misago.Forum']"}),
'posts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'posts_delta': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'prune_last': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'prune_start': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'pruned_archive': ('django.db.models.fie
|
tonioo/modoboa
|
modoboa/lib/db_utils.py
|
Python
|
isc
| 848
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.
|
db impor
|
t connection
from django.utils.translation import ugettext as _
from modoboa.lib.exceptions import InternalError
def db_table_exists(table):
"""Check if table exists."""
return table in connection.introspection.table_names()
def db_type(cname="default"):
"""Return the type of the *default* database
Supported values : 'postgres', 'mysql', 'sqlite'
:param str cname: connection name
:return: a string or None
"""
if cname not in settings.DATABASES:
raise InternalError(
_("Connection to database %s not configured" % cname))
for t in ["postgres", "mysql", "sqlite"]:
if settings.DATABASES[cname]["ENGINE"].find(t) != -1:
return t
return None
|
lmcro/webserver
|
qa/014-Broken-Key3.py
|
Python
|
gpl-2.0
| 294
| 0.013605
|
from base import *
class Test (TestBase):
|
def __init__ (self):
TestBase.__init__ (self, __file__)
self.name = "Broken header entry III"
self.expected_error = 200
self.request = "GET / HTTP/1.
|
0\r\n" +\
"Entry:value\r\n"
|
mcltn/ansible
|
lib/ansible/inventory/__init__.py
|
Python
|
gpl-3.0
| 28,850
| 0.00357
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
import os
import sys
import re
import stat
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.inventory.dir import InventoryDirectory, get_file_parser
from ansible.inventory.group import Group
from ansible.inventory.host import Host
from ansible.plugins import vars_loader
from ansible.utils.vars import combine_vars
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Inventory(object):
"""
Host inventory for ansible.
"""
def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST):
# the host file file, or script path, or list of hosts
# if a list, inventory data will NOT be loaded
self.host_list = host_list
self._loader = loader
self._variable_manager = variable_manager
# caching to avoid repeated calculations, particularly with
# external inventory scripts.
self._vars_per_host = {}
self._vars_per_group = {}
self._hosts_cache = {}
self._groups_list = {}
self._pattern_cache = {}
self._vars_plugins = []
self._groups_cache = {}
# to be set by calling set_playbook_basedir by playbook code
self._playbook_basedir = None
# the inventory object holds a list of groups
self.groups = []
# a list of host(names) to contain current inquiries to
self._restriction = None
self._subset = None
self.parse_inventory(host_list)
def parse_inventory(self, host_list):
if isinstance(host_list, basestring):
if "," in host_list:
host_list = host_list.split(",")
host_list = [ h for h in host_list if h and h.strip() ]
if host_list is None:
self.parser = None
elif isinstance(host_list, list):
self.parser = None
all = Group('all')
self.groups = [ all ]
ipv6_re = re.co
|
mpile('\[([a-f:A-F0-9]*[%[0-z]+]?)\](?::(\d+))?')
for x in host_list:
m = ipv6_re.match(x)
if m:
all.add_host(Host(m.groups()[0], m.groups()[1]))
else:
if ":" in x:
tokens = x.rsplit(":", 1)
# i
|
f there is ':' in the address, then this is an ipv6
if ':' in tokens[0]:
all.add_host(Host(x))
else:
all.add_host(Host(tokens[0], tokens[1]))
else:
all.add_host(Host(x))
elif os.path.exists(host_list):
#TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins'
if os.path.isdir(host_list):
# Ensure basedir is inside the directory
host_list = os.path.join(self.host_list, "")
self.parser = InventoryDirectory(loader=self._loader, filename=host_list)
else:
self.parser = get_file_parser(host_list, self._loader)
vars_loader.add_directory(self.basedir(), with_subdir=True)
if self.parser:
self.groups = self.parser.groups.values()
else:
# should never happen, but JIC
raise AnsibleError("Unable to parse %s as an inventory source" % host_list)
self._vars_plugins = [ x for x in vars_loader.all(self) ]
# FIXME: shouldn't be required, since the group/host vars file
# management will be done in VariableManager
# get group vars from group_vars/ files and vars plugins
for group in self.groups:
group.vars = combine_vars(group.vars, self.get_group_variables(group.name))
# get host vars from host_vars/ files and vars plugins
for host in self.get_hosts():
host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
def _match(self, str, pattern_str):
try:
if pattern_str.startswith('~'):
return re.search(pattern_str[1:], str)
else:
return fnmatch.fnmatch(str, pattern_str)
except Exception as e:
raise AnsibleError('invalid host pattern: %s' % pattern_str)
def _match_list(self, items, item_attr, pattern_str):
results = []
try:
if not pattern_str.startswith('~'):
pattern = re.compile(fnmatch.translate(pattern_str))
else:
pattern = re.compile(pattern_str[1:])
except Exception as e:
raise AnsibleError('invalid host pattern: %s' % pattern_str)
for item in items:
if pattern.match(getattr(item, item_attr)):
results.append(item)
return results
def _split_pattern(self, pattern):
"""
takes e.g. "webservers[0:5]:dbservers:others"
and returns ["webservers[0:5]", "dbservers", "others"]
"""
term = re.compile(
r'''(?: # We want to match something comprising:
[^:\[\]] # (anything other than ':', '[', or ']'
| # ...or...
\[[^\]]*\] # a single complete bracketed expression)
)* # repeated as many times as possible
''', re.X
)
return [x for x in term.findall(pattern) if x]
def get_hosts(self, pattern="all", ignore_limits_and_restrictions=False):
"""
Takes a pattern or list of patterns and returns a list of matching
inventory host names, taking into account any active restrictions
or applied subsets
"""
# Enumerate all hosts matching the given pattern (which may be
# either a list of patterns or a string like 'pat1:pat2').
if isinstance(pattern, list):
pattern = ':'.join(pattern)
if ';' in pattern or ',' in pattern:
display.deprecated("Use ':' instead of ',' or ';' to separate host patterns", version=2.0, removed=True)
patterns = self._split_pattern(pattern)
hosts = self._evaluate_patterns(patterns)
# mainly useful for hostvars[host] access
if not ignore_limits_and_restrictions:
# exclude hosts not in a subset, if defined
if self._subset:
subset = self._evaluate_patterns(self._subset)
hosts = [ h for h in hosts if h in subset ]
# exclude hosts mentioned in any restriction (ex: failed hosts)
if self._restriction is not None:
hosts = [ h for h in hosts if h in self._restriction ]
return hosts
def _split_pattern(self, pattern):
"""
takes e.g. "webservers[0:5]:dbservers:others"
and returns ["webservers[0:5]", "dbservers", "others"]
"""
term = re.compile(
r'''(?: # We want to match something comprising:
[^:\[\]] # (anything other than ':', '[', or ']'
| # ...or...
\[[^\]]*\]
|
bizalu/sImulAcre
|
core/lib/speech_recognition/__init__.py
|
Python
|
gpl-2.0
| 10,485
| 0.010205
|
"""Library for performing speech recognition with the Google Speech Recognition API."""
__author__ = 'Anthony Zhang (Uberi)'
__version__ = '1.0.4'
__license__ = 'BSD'
import io, subprocess, wave, shutil
import math, audioop, collections
import json, urllib.request
#wip: filter out clicks and other too short parts
class AudioSource(object):
def __init__(self):
raise NotImplementedError("this is an abstract class")
def __enter__(self):
raise NotImplementedError("this is an abstract class")
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError("this is an abstract class")
try:
import pyaudio
class Microphone(AudioSource):
def __init__(self, device_index = None):
self.device_index = device_index
self.format = pyaudio.paInt16 # 16-bit int sampling
self.SAMPLE_WIDTH = pyaudio.get_sample_size(self.format)
self.RATE = 16000 # sampling rate in Hertz
self.CHANNELS = 1 # mono audio
self.CHUNK = 1024 # number of frames stored in each buffer
self.audio = None
self.stream = None
def __enter__(self):
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(
input_device_index = self.device_index,
format = self.format, rate = self.RATE, channels = self.CHANNELS, frames_per_buffer = self.CHUNK,
input = True, # stream is an input stream
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stream.stop_stream()
self.stream.close()
self.stream = None
self.audio.terminate()
except ImportError:
pass
class WavFile(AudioSource):
def __init__(self, filename_or_fileobject):
if isinstance(filename_or_fileobject, str):
self.filename = filename_or_fileobject
else:
self.filename = None
self.wav_file = filename_or_fileobject
self.stream = None
def __enter__(self):
if self.filename: self.wav_file = open(self.filename, "rb")
self.wav_reader = wave.open(self.wav_file, "rb")
self.SAMPLE_WIDTH = self.wav_reader.getsampwidth()
self.RATE = self.wav_reader.getframerate()
self.CHANNELS = self.wav_reader.getnchannels()
assert self.CHANNELS == 1 # audio must be mono
self.CHUNK = 4096
self.stream = WavFile.WavStream(self.wav_reader)
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.filename: self.wav_file.close()
self.stream = None
class WavStream(object):
def __init__(self, wav_reader):
self.wav_reader = wav_reader
def read(self, size = -1):
if size == -1:
return self.wav_reader.readframes(self.wav_reader.getnframes())
return self.wav_reader.readframes(size)
class AudioData(object):
def __init__(self, rate, data):
self.rate = rate
self.data = data
class Recognizer(AudioSource):
def __init__(self, language = "fr-FR", key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw"):
self.key = key
self.language = language
self.energy_threshold = 1500 # minimum audio energy to consider for recording
self.pause_threshold = 0.8 # seconds of quiet time before a phrase is considered complete
self.quiet_duration = 0.5 # amount of quiet time to keep on both sides of the recording
def samples_to_flac(self, source, frame_data):
import platform, os
with io.BytesIO() as wav_file:
with wave.open(wav_file, "wb") as wav_writer:
wav_writer.setsampwidth(source.SAMPLE_WIDTH)
wav_writer.setnchannels(source.CHANNELS)
wav_writer.setframerate(source.RATE)
wav_writer.writeframes(frame_data)
wav_data = wav_file.getvalue()
# determine which converter executable to use
system = platform.system()
path = os.path.dirname(os.path.abspath(__file__)) # directory of the current module file, where all the FLAC bundled binaries are stored
if shutil.which("flac") is not None: # check for installed version first
flac_converter = shutil.which("flac")
elif system == "Windows" and platform.machine() in {"i386", "x86", "x86_64", "AMD64"}: # Windows NT, use the bundled FLAC conversion utility
flac_converter = os.path.join(path, "flac-win32.exe")
elif system == "Linux" and platform.machine() in {"i386", "x86", "x86_64", "AMD64"}:
flac_converter = os.path.join(path, "flac-linux-i386")
else:
raise ChildProcessError("FLAC conversion utility not available - consider installing the FLAC utility")
process = subprocess.Popen("\"%s\" --stdout --totally-silent --best -" % flac_converter, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
flac_data, stder
|
r = process.communicate(wav_data)
return flac_data
def record(self, source, duration = None):
assert isinstance(source, AudioSource) and source.stream
frames = io.BytesIO()
seconds_per_buffer = source.CHUNK / source.RATE
elapsed_time = 0
while True: # loop for the total number of chunks needed
elapsed_time += seconds_per_buffer
|
if duration and elapsed_time > duration: break
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break
frames.write(buffer)
frame_data = frames.getvalue()
frames.close()
return AudioData(source.RATE, self.samples_to_flac(source, frame_data))
def listen(self, source, timeout = None):
assert isinstance(source, AudioSource) and source.stream
# record audio data as raw samples
frames = collections.deque()
assert self.pause_threshold >= self.quiet_duration >= 0
seconds_per_buffer = source.CHUNK / source.RATE
pause_buffer_count = math.ceil(self.pause_threshold / seconds_per_buffer) # number of buffers of quiet audio before the phrase is complete
quiet_buffer_count = math.ceil(self.quiet_duration / seconds_per_buffer) # maximum number of buffers of quiet audio to retain before and after
elapsed_time = 0
# store audio input until the phrase starts
while True:
# handle timeout if specified
elapsed_time += seconds_per_buffer
if timeout and elapsed_time > timeout:
raise TimeoutError("listening timed out")
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
# check if the audio input has stopped being quiet
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold:
break
if len(frames) > quiet_buffer_count: # ensure we only keep the needed amount of quiet buffers
frames.popleft()
# read audio input until the phrase ends
pause_count = 0
while True:
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
# check if the audio input has gone quiet for longer than the pause threshold
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold:
pause_count = 0
else:
pause_count += 1
if pause_count > pause_buffer_count: # end of the phrase
break
# obtain frame data
for i in range(quiet_buffer_count, pause_buffer_count): frames.pop() # remove extra quiet frames at the end
frame_data = b"".join(list(frames))
return AudioData(source.RATE, self.samples_to_flac(source, frame_data))
def recognize(self, audio_data, show_al
|
aaalgo/cls
|
train-slim-fcn.py
|
Python
|
mit
| 13,078
| 0.005811
|
#!/usr/bin/env python3
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'models/research/slim'))
import time
import datetime
import logging
from tqdm import tqdm
import numpy as np
import cv2
import simplejson as json
from sklearn.metrics import accuracy_score, roc_auc_score
import tensorflow as tf
import tensorflow.contrib.layers as layers
import tensorflow.contrib.slim as slim
from nets import nets_factory, resnet_utils #import get_network_fn
import picpac
def patch_arg_scopes ():
def resnet_arg_scope (weight_decay=0.0001):
print_red("Patching resnet_v2 arg_scope when training from scratch")
return resnet_utils.resnet_arg_scope
|
(weight_decay=weight_decay,
batch_norm_decay=0.9,
batch_norm_epsilon=5e-4,
batch_norm_scale=False)
nets_factory.arg_scopes_map['resnet_v1_50'] = resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v1_1
|
01'] = resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v1_152'] = resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v1_200'] = resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v2_50'] = resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v2_101'] = resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v2_152'] = resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v2_200'] = resnet_arg_scope
pass
augments = None
#from . config import *
#if os.path.exists('config.py'):
def print_red (txt):
print('\033[91m' + txt + '\033[0m')
def print_green (txt):
print('\033[92m' + txt + '\033[0m')
print(augments)
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('db', None, 'training db')
flags.DEFINE_string('val_db', None, 'validation db')
flags.DEFINE_integer('classes', 2, 'number of classes')
flags.DEFINE_string('mixin', None, 'mix-in training db')
flags.DEFINE_integer('size', None, '')
flags.DEFINE_integer('batch', 1, 'Batch size. ')
flags.DEFINE_integer('channels', 3, '')
flags.DEFINE_integer('shift', 0, '')
flags.DEFINE_integer('stride', 16, '')
flags.DEFINE_integer('max_size', 2000, '')
flags.DEFINE_boolean('cache', True, '')
flags.DEFINE_integer('picpac_dump', 0, '')
flags.DEFINE_string('augments', None, 'augment config file')
flags.DEFINE_string('backbone', 'resnet_v2_50', 'architecture')
flags.DEFINE_string('model', None, 'model directory')
flags.DEFINE_string('resume', None, 'resume training from this model')
flags.DEFINE_string('finetune', None, '')
flags.DEFINE_integer('max_to_keep', 100, '')
# optimizer settings
flags.DEFINE_float('lr', 0.01, 'Initial learning rate.')
flags.DEFINE_float('decay_rate', 0.95, '')
flags.DEFINE_float('decay_steps', 500, '')
flags.DEFINE_float('weight_decay', 0.00004, '')
#
flags.DEFINE_integer('epoch_steps', None, '')
flags.DEFINE_integer('max_epochs', 20000, '')
flags.DEFINE_integer('ckpt_epochs', 10, '')
flags.DEFINE_integer('val_epochs', 10, '')
flags.DEFINE_boolean('adam', False, '')
flags.DEFINE_boolean('vgg', False, '')
COLORSPACE = 'BGR'
PIXEL_MEANS = tf.constant([[[[127.0, 127.0, 127.0]]]])
PIXEL_MEANS1 = tf.constant([[[[127.0]]]])
VGG_PIXEL_MEANS = tf.constant([[[[103.94, 116.78, 123.68]]]])
def fcn_loss (logits, labels):
logits = tf.reshape(logits, (-1, FLAGS.classes))
labels = tf.reshape(labels, (-1,))
# cross-entropy
xe = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
xe = tf.reduce_mean(xe, name='xe')
# accuracy
acc = tf.cast(tf.nn.in_top_k(logits, labels, 1), tf.float32)
acc = tf.reduce_mean(acc, name='acc')
# regularization
reg = tf.reduce_sum(tf.losses.get_regularization_losses())
reg = tf.identity(reg, name='re')
# loss
loss = tf.identity(xe + reg, name='lo')
return loss, [acc, xe, reg, loss]
def setup_finetune (ckpt, exclusions):
print("Finetuning %s" % ckpt)
# TODO(sguada) variables.filter_variables()
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
print("Excluding %s" % var.op.name)
excluded = True
break
if not excluded:
variables_to_restore.append(var)
if tf.gfile.IsDirectory(ckpt):
ckpt = tf.train.latest_checkpoint(ckpt)
variables_to_train = []
for scope in exclusions:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
print("Training %d out of %d variables" % (len(variables_to_train), len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))))
if len(variables_to_train) < 10:
for var in variables_to_train:
print(" %s" % var.op.name)
return slim.assign_from_checkpoint_fn(
ckpt, variables_to_restore,
ignore_missing_vars=False), variables_to_train
def create_picpac_stream (db_path, is_training):
assert os.path.exists(db_path)
augments = []
if is_training:
if FLAGS.augments:
with open(FLAGS.augments, 'r') as f:
augments = json.loads(f.read())
print("Using augments:")
print(json.dumps(augments))
else:
augments = [
#{"type": "augment.flip", "horizontal": True, "vertical": False},
{"type": "augment.rotate", "min":-10, "max":10},
{"type": "augment.scale", "min":0.9, "max":1.1},
{"type": "augment.add", "range":20},
]
else:
augments = []
config = {"db": db_path,
"loop": is_training,
"shuffle": is_training,
"reshuffle": is_training,
"annotate": True,
"channels": FLAGS.channels,
"stratify": is_training,
"dtype": "float32",
"batch": FLAGS.batch,
"colorspace": COLORSPACE,
"cache": FLAGS.cache,
"dump": FLAGS.picpac_dump,
"transforms": augments + [
{"type": "resize", "max_size": FLAGS.max_size},
{"type": "clip", "round": FLAGS.stride},
{"type": "rasterize"},
]
}
if is_training and not FLAGS.mixin is None:
print("mixin support is incomplete in new picpac.")
assert os.path.exists(FLAGS.mixin)
config['mixin'] = FLAGS.mixin
config['mixin_group_reset'] = 0
config['mixin_group_delta'] = 1
pass
return picpac.ImageStream(config)
def main (_):
global PIXEL_MEANS
global PIXEL_MEANS1
if FLAGS.channels == 1:
PIXEL_MEANS = PIXEL_MEANS1
logging.basicConfig(filename='train-%s-%s.log' % (FLAGS.backbone, datetime.datetime.now().strftime('%Y%m%d-%H%M%S')),level=logging.DEBUG, format='%(asctime)s %(message)s')
if FLAGS.model:
try:
os.makedirs(FLAGS.model)
except:
pass
if FLAGS.finetune or FLAGS.vgg:
print_red("finetune, using RGB with vgg pixel means")
COLORSPACE = 'RGB'
PIXEL_MEANS = VGG_PIXEL_MEANS
X = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images")
# ground truth labels
Y = tf.placeholder(tf.int32, shape=(None, None, None, 1), name="labels")
is_training = tf.placeholder(tf.bool, name="is_training")
if not FLAGS.finetune:
patch_arg_scopes()
#with \
# slim.arg_scope([slim.batch_norm], decay=0.9, epsilon=5e-4):
network_fn = nets_factory.get_network_fn(FLAGS.backbone, num_classes=None,
weight_decay=FLAGS.weight_decay, is_training=is_training)
ft, _ = network_fn(X-PIXEL_MEANS, global_pool=False, output_stride=16)
FLAGS.stride = 16
with tf.variable_scope('head'):
logits = slim.conv2d_transpose(ft, FLAGS.classes, 32, 16)
logits = tf.identity(logits, name='logits')
# probability of class 1 -- not very useful if FLAGS.classes > 2
probs = tf.squeeze(tf.slice(tf.nn.softmax(logits
|
iABC2XYZ/abc
|
DM_RFGAP_3/PartGen.py
|
Python
|
gpl-3.0
| 516
| 0.05814
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 2 17:52:19 2017
|
Author: Peiyong Jiang : jiangpeiyong@impcas.ac.cn
Function:
______________________________________________________
"""
from numpy.random import multivariate_normal as npmvn
from numpy import diag
def
|
PartGen(emitT,numPart):
meanPart=[0.,0.,0.,0.,0.,0.]
covPart=diag([emitT[0],emitT[0],emitT[1],emitT[1],emitT[2],emitT[2]])
x,xp,y,yp,z,zp=npmvn(meanPart,covPart,numPart).T
return x,xp,y,yp,z,zp
|
twhyte/openparliament
|
parliament/imports/hans_old/current.py
|
Python
|
agpl-3.0
| 11,722
| 0.006398
|
"""This *was* the parser for the current HTML format on parl.gc.ca.
But now we have XML. See parl_document.py.
This module is organized like so:
__init__.py - utility functions, simple parse interface
common.py - infrastructure used in the parsers, i.e. regexes
current.py - parser for the Hansard format used from 2006 to the present
old.py - (fairly crufty) parser for the format used from 1994 to 2006
"""
from parliament.imports.hans_old.common import *
import logging
logger = logging.getLogger(__name__)
class HansardParser2009(HansardParser):
def __init__(self, hansard, html):
for regex in STARTUP_RE_2009:
html = re.sub(regex[0], regex[1], html)
super(HansardParser2009, self).__init__(hansard, html)
for x in self.soup.findAll('a', 'deleteMe'):
x.findParent('div').extract()
def process_related_link(self, tag, string, current_politician=None):
#print "PROCESSING RELATED for %s" % string
resid = re.search(r'ResourceID=(\d+)', tag['href'])
restype = re.search(r'ResourceType=(Document|Affiliation)', tag['href'])
if not resid and restype:
return string
resid, restype = int(resid.group(1)), restype.group(1)
if restype == 'Document':
try:
bill = Bill.objects.get_by_legisinfo_id(resid)
except Bill.DoesNotExist:
match = re.search(r'\b[CS]\-\d+[A-E]?\b', string)
if not m
|
atch:
logger.error("Invalid bill link %s" % string)
return string
bill = Bill.obje
|
cts.create_temporary_bill(legisinfo_id=resid,
number=match.group(0), session=self.hansard.session)
except Exception, e:
print "Related bill search failed for callback %s" % resid
print repr(e)
return string
return u'<bill id="%d" name="%s">%s</bill>' % (bill.id, escape(bill.name), string)
elif restype == 'Affiliation':
try:
pol = Politician.objects.getByParlID(resid)
except Politician.DoesNotExist:
print "Related politician search failed for callback %s" % resid
if getattr(settings, 'PARLIAMENT_LABEL_FAILED_CALLBACK', False):
# FIXME migrate away from internalxref?
InternalXref.objects.get_or_create(schema='pol_parlid', int_value=resid, target_id=-1)
return string
if pol == current_politician:
return string # When someone mentions her riding, don't link back to her
return u'<pol id="%d" name="%s">%s</pol>' % (pol.id, escape(pol.name), string)
def get_text(self, cursor):
text = u''
for string in cursor.findAll(text=parsetools.r_hasText):
if string.parent.name == 'a' and string.parent['class'] == 'WebOption':
text += self.process_related_link(string.parent, string, self.t['politician'])
else:
text += unicode(string)
return text
def parse(self):
super(HansardParser2009, self).parse()
# Initialize variables
t = ParseTracker()
self.t = t
member_refs = {}
# Get the date
c = self.soup.find(text='OFFICIAL REPORT (HANSARD)').findNext('h2')
self.date = datetime.datetime.strptime(c.string.strip(), "%A, %B %d, %Y").date()
self.hansard.date = self.date
self.hansard.save()
c = c.findNext(text=r_housemet)
match = re.search(r_housemet, c.string)
t['timestamp'] = self.houseTime(match.group(1), match.group(2))
t.setNext('timestamp', t['timestamp'])
# Move the pointer to the start
c = c.next
# And start the big loop
while c is not None:
# It's a string
if not hasattr(c, 'name'):
pass
# Heading
elif c.name == 'h2':
c = c.next
if not parsetools.isString(c): raise ParseException("Expecting string right after h2")
t.setNext('heading', parsetools.titleIfNecessary(parsetools.tameWhitespace(c.string.strip())))
# Topic
elif c.name == 'h3':
top = c.find(text=r_letter)
#if not parsetools.isString(c):
# check if it's an empty header
# if c.parent.find(text=r_letter):
# raise ParseException("Expecting string right after h3")
if top is not None:
c = top
t['topic_set'] = True
t.setNext('topic', parsetools.titleIfNecessary(parsetools.tameWhitespace(c.string.strip())))
elif c.name == 'h4':
if c.string == 'APPENDIX':
self.saveStatement(t)
print "Appendix reached -- we're done!"
break
# Timestamp
elif c.name == 'a' and c.has_key('name') and c['name'].startswith('T'):
match = re.search(r'^T(\d\d)(\d\d)$', c['name'])
if match:
t.setNext('timestamp', parsetools.time_to_datetime(
hour=int(match.group(1)),
minute=int(match.group(2)),
date=self.date))
else:
raise ParseException("Couldn't match time %s" % c.attrs['name'])
elif c.name == 'b' and c.string:
# Something to do with written answers
match = r_honorific.search(c.string)
if match:
# It's a politician asking or answering a question
# We don't get a proper link here, so this has to be a name match
polname = re.sub(r'\(.+\)', '', match.group(2)).strip()
self.saveStatement(t)
t['member_title'] = c.string.strip()
t['written_question'] = True
try:
pol = Politician.objects.get_by_name(polname, session=self.hansard.session)
t['politician'] = pol
t['member'] = ElectedMember.objects.get_by_pol(politician=pol, date=self.date)
except Politician.DoesNotExist:
print "WARNING: No name match for %s" % polname
except Politician.MultipleObjectsReturned:
print "WARNING: Multiple pols for %s" % polname
else:
if not c.string.startswith('Question'):
print "WARNING: Unexplained boldness: %s" % c.string
# div -- the biggie
elif c.name == 'div':
origdiv = c
if c.find('b'):
# We think it's a new speaker
# Save the current buffer
self.saveStatement(t)
c = c.find('b')
if c.find('a'):
# There's a link...
c = c.find('a')
match = re.search(r'ResourceType=Affiliation&ResourceID=(\d+)', c['href'])
if match and c.find(text=r_letter):
parlwebid = int(match.group(1))
# We have the parl ID. First, see if we already know this ID.
pol = Politician.objects.getByParlID(parlwebid, lookOnline=False)
if pol is None:
# We don't. Try to do a quick name match first (if flags say so)
if not GET_PARLID_ONLINE:
who = c.next.string
match = re.search(r_honorific, who)
if match:
|
kubeflow/pipelines
|
components/gcp/container/component_sdk/python/tests/google/bigquery/test__query.py
|
Python
|
apache-2.0
| 5,793
| 0.005351
|
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from google.cloud import bigquery
from google.cloud.bigquery.job import ExtractJobConfig, DestinationFormat
from google.api_core import exceptions
from kfp_component.google.bigquery import query
CREATE_JOB_MODULE = 'kfp_component.google.bigquery._query'
@mock.patch(CREATE_JOB_MODULE + '.display.display')
@mock.patch(CREATE_JOB_MODULE + '.gcp_common.dump_file')
@mock.patch(CREATE_JOB_MODULE + '.KfpExecutionContext')
@mock.patch(CREATE_JOB_MODULE + '.bigquery.Client')
class TestQuery(unittest.TestCase):
def test_query_succeed(self, mock_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
mock_client().get_job.side_effect = exceptions.NotFound('not found')
mock_dataset = bigquery.DatasetReference('project-1', 'dataset-1')
mock_client().dataset.return_value = mock_dataset
mock_client().get_dataset.side_effect = exceptions.NotFound('not found')
mock_response = {
'configuration': {
'query': {
'query': 'SELECT * FROM table_1'
}
}
}
mock_client().query.return_value.to_api_repr.return_value = mock_response
result = query('SELECT * FROM table_1', 'project-1', 'dataset-1',
output_gcs_path='gs://output/path')
self.assertEqual(mock_response, result)
mock_client().create_dataset.assert_called()
expected_job_config = bigquery.QueryJobConfig()
expected_job_config.create_disposition = bigquery.job.CreateDisposition.CREATE_IF_NEEDED
expected_job_config.write_disposition = bigquery.job.WriteDisposition.WRITE_TRUNCATE
expected_job_config.destination = mock_dataset.table('query_ctx1')
mock_client().query.assert_called_with('SELECT * FROM table_1',mock.ANY,
job_id = 'query_ctx1')
actu
|
al_job_config = mock_client().query.call_args_list[0][0][1]
self.assertDictEqual(
expected
|
_job_config.to_api_repr(),
actual_job_config.to_api_repr()
)
extract = mock_client().extract_table.call_args_list[0]
self.assertEqual(extract[0], (mock_dataset.table('query_ctx1'), 'gs://output/path',))
self.assertEqual(extract[1]["job_config"].destination_format, "CSV",)
def test_query_no_output_path(self, mock_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
mock_client().get_job.side_effect = exceptions.NotFound('not found')
mock_dataset = bigquery.DatasetReference('project-1', 'dataset-1')
mock_client().dataset.return_value = mock_dataset
mock_client().get_dataset.return_value = bigquery.Dataset(mock_dataset)
mock_response = {
'configuration': {
'query': {
'query': 'SELECT * FROM table_1'
}
}
}
mock_client().query.return_value.to_api_repr.return_value = mock_response
result = query('SELECT * FROM table_1', 'project-1', 'dataset-1', 'table-1')
self.assertEqual(mock_response, result)
mock_client().create_dataset.assert_not_called()
mock_client().extract_table.assert_not_called()
expected_job_config = bigquery.QueryJobConfig()
expected_job_config.create_disposition = bigquery.job.CreateDisposition.CREATE_IF_NEEDED
expected_job_config.write_disposition = bigquery.job.WriteDisposition.WRITE_TRUNCATE
expected_job_config.destination = mock_dataset.table('table-1')
mock_client().query.assert_called_with('SELECT * FROM table_1',mock.ANY,
job_id = 'query_ctx1')
actual_job_config = mock_client().query.call_args_list[0][0][1]
self.assertDictEqual(
expected_job_config.to_api_repr(),
actual_job_config.to_api_repr()
)
def test_query_output_json_format(self, mock_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
mock_client().get_job.side_effect = exceptions.NotFound('not found')
mock_dataset = bigquery.DatasetReference('project-1', 'dataset-1')
mock_client().dataset.return_value = mock_dataset
mock_client().get_dataset.side_effect = exceptions.NotFound('not found')
mock_response = {
'configuration': {
'query': {
'query': 'SELECT * FROM table_1'
}
}
}
mock_client().query.return_value.to_api_repr.return_value = mock_response
result = query('SELECT * FROM table_1', 'project-1', 'dataset-1',
output_gcs_path='gs://output/path',
output_destination_format="NEWLINE_DELIMITED_JSON")
self.assertEqual(mock_response, result)
mock_client().create_dataset.assert_called()
extract = mock_client().extract_table.call_args_list[0]
self.assertEqual(extract[0], (mock_dataset.table('query_ctx1'), 'gs://output/path',))
self.assertEqual(extract[1]["job_config"].destination_format, "NEWLINE_DELIMITED_JSON",)
|
wulczer/ansible
|
lib/ansible/inventory/ini.py
|
Python
|
gpl-3.0
| 7,628
| 0.003146
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory
|
.group import Group
from ansible.inventory.expand_hosts import detect_range
from ansible.inventory.expand_hosts import expand_hostname_range
from ansible import errors
from ansible import utils
import shlex
import re
import ast
class InventoryParser(object):
"""
Host inventory for ansible.
"""
def __ini
|
t__(self, filename=C.DEFAULT_HOST_LIST):
with open(filename) as fh:
self.lines = fh.readlines()
self.groups = {}
self.hosts = {}
self._parse()
def _parse(self):
self._parse_base_groups()
self._parse_group_children()
self._add_allgroup_children()
self._parse_group_variables()
return self.groups
@staticmethod
def _parse_value(v):
if "#" not in v:
try:
return ast.literal_eval(v)
# Using explicit exceptions.
# Likely a string that literal_eval does not like. We wil then just set it.
except ValueError:
# For some reason this was thought to be malformed.
pass
except SyntaxError:
# Is this a hash with an equals at the end?
pass
return v
# [webservers]
# alpha
# beta:2345
# gamma sudo=True user=root
# delta asdf=jkl favcolor=red
def _add_allgroup_children(self):
for group in self.groups.values():
if group.depth == 0 and group.name != 'all':
self.groups['all'].add_child_group(group)
def _parse_base_groups(self):
# FIXME: refactor
ungrouped = Group(name='ungrouped')
all = Group(name='all')
all.add_child_group(ungrouped)
self.groups = dict(all=all, ungrouped=ungrouped)
active_group_name = 'ungrouped'
for line in self.lines:
line = utils.before_comment(line).strip()
if line.startswith("[") and line.endswith("]"):
active_group_name = line.replace("[","").replace("]","")
if ":vars" in line or ":children" in line:
active_group_name = active_group_name.rsplit(":", 1)[0]
if active_group_name not in self.groups:
new_group = self.groups[active_group_name] = Group(name=active_group_name)
active_group_name = None
elif active_group_name not in self.groups:
new_group = self.groups[active_group_name] = Group(name=active_group_name)
elif line.startswith(";") or line == '':
pass
elif active_group_name:
tokens = shlex.split(line)
if len(tokens) == 0:
continue
hostname = tokens[0]
port = C.DEFAULT_REMOTE_PORT
# Three cases to check:
# 0. A hostname that contains a range pesudo-code and a port
# 1. A hostname that contains just a port
if hostname.count(":") > 1:
# Possible an IPv6 address, or maybe a host line with multiple ranges
# IPv6 with Port XXX:XXX::XXX.port
# FQDN foo.example.com
if hostname.count(".") == 1:
(hostname, port) = hostname.rsplit(".", 1)
elif ("[" in hostname and
"]" in hostname and
":" in hostname and
(hostname.rindex("]") < hostname.rindex(":")) or
("]" not in hostname and ":" in hostname)):
(hostname, port) = hostname.rsplit(":", 1)
hostnames = []
if detect_range(hostname):
hostnames = expand_hostname_range(hostname)
else:
hostnames = [hostname]
for hn in hostnames:
host = None
if hn in self.hosts:
host = self.hosts[hn]
else:
host = Host(name=hn, port=port)
self.hosts[hn] = host
if len(tokens) > 1:
for t in tokens[1:]:
if t.startswith('#'):
break
try:
(k,v) = t.split("=", 1)
except ValueError, e:
raise errors.AnsibleError("Invalid ini entry: %s - %s" % (t, str(e)))
host.set_variable(k, self._parse_value(v))
self.groups[active_group_name].add_host(host)
# [southeast:children]
# atlanta
# raleigh
def _parse_group_children(self):
group = None
for line in self.lines:
line = line.strip()
if line is None or line == '':
continue
if line.startswith("[") and ":children]" in line:
line = line.replace("[","").replace(":children]","")
group = self.groups.get(line, None)
if group is None:
group = self.groups[line] = Group(name=line)
elif line.startswith("#") or line.startswith(";"):
pass
elif line.startswith("["):
group = None
elif group:
kid_group = self.groups.get(line, None)
if kid_group is None:
raise errors.AnsibleError("child group is not defined: (%s)" % line)
else:
group.add_child_group(kid_group)
# [webservers:vars]
# http_port=1234
# maxRequestsPerChild=200
def _parse_group_variables(self):
group = None
for line in self.lines:
line = line.strip()
if line.startswith("[") and ":vars]" in line:
line = line.replace("[","").replace(":vars]","")
group = self.groups.get(line, None)
if group is None:
raise errors.AnsibleError("can't add vars to undefined group: %s" % line)
elif line.startswith("#") or line.startswith(";"):
pass
elif line.startswith("["):
group = None
elif line == '':
pass
elif group:
if "=" not in line:
raise errors.AnsibleError("variables assigned to group must be in key=value form")
else:
(k, v) = [e.strip() for e in line.split("=", 1)]
group.set_variable(k, self._parse_value(v))
def get_host_variables(self, host):
return {}
|
phantomii/restalchemy
|
restalchemy/tests/functional/restapi/sa_based/microservice/db.py
|
Python
|
apache-2.0
| 1,082
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2016 Eugene Frolov <eugene@frolov.net.ru>
#
# All Rights Reserved.
#
# Licensed under the Apac
|
he License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# Li
|
cense for the specific language governing permissions and limitations
# under the License.
import uuid
import sqlalchemy as sa
from sqlalchemy import orm
_engine = None
_session_maker = None
DB_CONNECTION = "sqlite:////tmp/restalchemy-%s.db" % uuid.uuid4()
def get_engine():
global _engine
if _engine is None:
_engine = sa.create_engine(DB_CONNECTION, echo=True)
return _engine
def get_session():
return orm.sessionmaker(bind=get_engine())
|
udayinfy/openerp-7.0
|
gap_analysis_project/gap_analysis_project.py
|
Python
|
agpl-3.0
| 13,460
| 0.008915
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010-2013 Elico Corp. All Rights Reserved.
# Author: Yannick Gouin <yannick.gouin@elico-corp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
import time
from osv import fields, osv
from tools.translate import _
import tools
from tools import ustr
class gap_analysis(osv.Model):
_inherit = "gap_analysis"
_name = "gap_analysis"
def generate_project(self, cr, uid, ids, context=None):
project_pool = self.pool.get('project.project')
task_pool = self.pool.get('project.task')
for gap in self.browse(cr, uid, ids, context=context):
partner_id = gap.partner_id and gap.partner_id.id or False
notes = gap.note or ''
project_vals = {
'name': gap.name,
'description': notes,
'user_id': gap.user_id.id,
'partner_id': partner_id,
'gap_analysis_id': gap.id,
}
project_id = project_pool.create(cr, uid, project_vals, context=context)
for gap_line in gap.gap_lines:
if gap_line.to_project and gap_line.keep:
time4dev = 0
time4tech = 0
time4fct = 0
time4test = gap_line.testing or 0
if gap_line.effort:
if gap_line.effort.unknown:
time4dev = gap_line.duration_wk
else:
time4dev = gap_line.effort.duration
for workload in gap_line.workloads:
if workload.type.category == "Technical Analysis":
time4tech += workload.duration
else:
time4fct += workload.duration
# Create Tasks
if time4dev > 0 or time4tech > 0 or time4fct > 0 or time4test > 0:
maintask_vals = {
'name': gap_line.functionality.name[0:100],
'code_gap': gap_line.code or "",
'project_id': project_id,
'notes': ustr(gap_line.functionality.description or gap_line.functionality.name),
'partner_id': partner_id,
'gap_category_id': gap_line.category and gap_line.category.id or False,
'user_id': gap.user_functional and gap.user_functional.id or False,
'gap_line_id': gap_line.id,
'to_report': True,
'org_planned_hours': 0,
'planned_hours': 0,
'remaining_hours': 0,
|
}
maintask_id = task_pool.create(cr, uid, maintask_vals, context=context)
maintask_id = [int(maintask_id)]
if time4test > 0:
task_vals4test = {
|
'name': gap_line.functionality.name[0:100] + " [TEST]",
'code_gap': gap_line.code or "",
'project_id': project_id,
'notes': ustr(gap_line.functionality.description or gap_line.functionality.name),
'partner_id': partner_id,
'org_planned_hours': time4test,
'planned_hours': time4test,
'remaining_hours': time4test,
'parent_ids': [(6,0,maintask_id)],
'gap_category_id': gap_line.category and gap_line.category.id or False,
'user_id': gap.user_test and gap.user_test.id or False,
'gap_line_id': gap_line.id,
}
task_pool.create(cr, uid, task_vals4test, context=context)
if time4dev > 0:
task_vals4dev = {
'name': gap_line.functionality.name[0:100] + " [DEV]",
'code_gap': gap_line.code or "",
'project_id': project_id,
'notes': ustr(gap_line.functionality.description or gap_line.functionality.name),
'partner_id': partner_id,
'org_planned_hours': time4dev,
'planned_hours': time4dev,
'remaining_hours': time4dev,
'parent_ids': [(6,0,maintask_id)],
'gap_category_id': gap_line.category and gap_line.category.id or False,
'user_id': gap.user_dev and gap.user_dev.id or False,
'gap_line_id': gap_line.id,
}
task_pool.create(cr, uid, task_vals4dev, context=context)
if time4tech > 0:
task_vals4tech = {
'name': gap_line.functionality.name[0:100] + " [TECH]",
'code_gap': gap_line.code or "",
'project_id': project_id,
'notes': ustr(gap_line.functionality.description or gap_line.functionality.name),
'partner_id': partner_id,
'org_planned_hours': time4tech,
'planned_hours': time4tech,
'remaining_hours': time4tech,
'parent_ids': [(6,0,maintask_id)],
'gap_category_id': gap_line.category and gap_line.category.id or False,
'user_id': gap.user_technical and gap.user_technical.id or False,
'gap_line_id': gap_line.id,
}
task_pool.create(cr, uid, task_vals4tech, context=context)
if time4fct > 0:
task_vals4fct = {
'name': gap_line.functionality.name[0:100] + " [FUNC]",
'code_gap': gap_line.code or "",
'project_id': project_id,
'notes': ustr(gap_line.functionality.description or gap_line.functionality.name),
'partner_id': partner_id,
'org_planned_hours': time4fct,
'planned_hours': time4fct,
'remaining_hours': time4fct,
'parent_ids': [(6,0,maintask_id)],
'gap_category_id': gap_line.functionality.category and gap_line.functionality.category.id or False,
'user_id': gap.user_functional and gap.user_functional.id or False,
'gap_line_id': gap_line.i
|
joshisa/mistub
|
mistub/models/concepts.py
|
Python
|
apache-2.0
| 833
| 0
|
#!/usr/bin/env python
"""Contains the Data Model for a cool Resource.
"""
__author__ = "Sanjay Joshi"
__copyright__ = "IBM Copyright 2017"
__credits__ = ["Sanjay Joshi"]
__license__ = "Apache 2.0"
__version__ = "1.0"
__maintainer__ = "Sanjay Joshi"
__email__ = "joshisa@us.ibm.com"
__status__ = "Prototype"
schema = {
'url': 'corpora/ada_diabetes/concepts',
'schema': {
'cloudhost': {
'type': 'string',
|
'default': 'Powered by IBM
|
Bluemix and Python Eve'
},
'base16': {
'type': 'string',
'default': '######'
},
'hex': {
'type': 'string',
'default': '##-##-##'
},
'organization': {
'type': 'string',
'default': 'Doh!MissingOrg'
}
},
'allow_unknown': True
}
|
buzmakov/tomography_scripts
|
tomo/yaivan/dispersion/alg.py
|
Python
|
mit
| 1,930
| 0.031088
|
import astra
def gpu_fp(pg, vg, v):
v_id = astra.data2d.create('-vol', vg, v)
rt_id = astra.data2d.create('-sino', pg)
fp_cfg = astra.astra_dict('FP_CUDA')
fp_cfg['VolumeDataId'] = v_id
fp_cfg['ProjectionDataId'] = rt_id
fp_id = astra.algorithm.create(fp_cfg)
astra.algorithm.run(fp_id)
out = astra.data2d.get(rt_id)
astra.algorithm.delete(fp_id)
astra.data2d.delete(rt_id)
astra.data2d.delete(v_id)
return out
def gpu_bp(pg, vg, rt, supersampling=1):
v_id = astra.data2d.create('-vol', vg)
rt_id = astra.data2d.create('-sino', pg, data=rt)
bp_cfg = astra.astra_dict('BP_CUDA')
bp_cfg['ReconstructionDataId'] = v_id
bp_cfg['ProjectionDataId'] = rt_id
bp_id = astra.algorithm.create(bp_cfg)
astra.algorithm.run(bp_id)
out = astra.data2d.get(v_id)
astra.algorithm.delete(bp_id)
astra.data2d.delete(rt_id)
astra.data2d.delete(v_id)
return out
def gpu_fbp(pg, vg, rt):
rt_id = astra.data2d.create('-sino', pg, data=rt)
v_id = astra.data2d.create('-vol', vg)
fbp_cfg = astra.astra_dict('FBP_CUDA')
fbp_cfg['ReconstructionDataId'] = v_id
fbp_cfg['ProjectionDataId'] = rt_id
#fbp_cfg['FilterType'] = 'none'
fbp_id = astra.algorithm.create(fbp_cfg)
astra.algorithm.run(fbp_id, 100)
out = astra.data2d.get(v_id)
astra.algorithm.delete(fbp_id)
astra.data2d.delete(rt_id)
astra.data2d.delete(v_id)
return out
def gpu_sirt(pg, vg, rt, n_iters=100):
rt_id = astra.data2d.create('-sino', pg, data=r
|
t)
v_id = astra.data2d.create('-vol', vg)
sirt_cfg = astra.astra_dict(
|
'SIRT_CUDA')
sirt_cfg['ReconstructionDataId'] = v_id
sirt_cfg['ProjectionDataId'] = rt_id
#sirt_cfg['option'] = {}
#sirt_cfg['option']['MinConstraint'] = 0
sirt_id = astra.algorithm.create(sirt_cfg)
astra.algorithm.run(sirt_id, n_iters)
out = astra.data2d.get(v_id)
astra.algorithm.delete(sirt_id)
astra.data2d.delete(rt_id)
astra.data2d.delete(v_id)
return out
|
quantumlib/Cirq
|
dev_tools/import_test.py
|
Python
|
apache-2.0
| 8,158
| 0.000981
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Locates imports that violate cirq's submodule dependencies.
Specifically, this test treats the modules as a tree structure where `cirq` is
the root, each submodule is a node and each python file is a leaf node. While
a node (module) is in the process of being imported, it is not allowed to import
nodes for the first time other than its children. If a module was imported
earlier by `cirq.__init__`, it may be imported. This is currently only enforced
for the first level of submodules under cirq, not sub-submodules.
Usage:
dev_tools/import_test.py [-h] [--time] [--others]
optional arguments:
-h, --help show this help message and exit
--time print a report of the modules that took the longest to import
--others also track packages other than cirq and print when they are
imported
"""
from typing import List
import argparse
import collections
import os.path
import subprocess
import sys
import time
parser = argparse.ArgumentParser(
description="Locates imports that violate cirq's submodule dependencies."
)
parser.add_argument(
'--time',
action='store_true',
help='print a report of the modules that took the longest to import',
)
parser.add_argument(
'--others',
action='store_true',
help='also track packages other than cirq and print when they are imported',
)
def verify_import_tree(depth: int = 1, track_others: bool = False, timeit: bool = False) -> bool:
"""Locates imports that violate cirq's submodule dependencies by
instrumenting python import machinery then importing cirq.
Logs when each submodule (up to the given depth) begins and ends executing
during import and prints an error when any import within a submodule causes
a neighboring module to be imported for the first time. The indent
pattern of the printed output will match the module tree structure if the
imports are all valid. Otherwise an error is printed indicating the
location of the invalid import.
Output for valid imports:
Start cirq
...
Start cirq.study
End cirq.study
Start cirq.circuits
End cirq.circuits
Start cirq.schedules
End cirq.schedules
...
End cirq
Output for an invalid import in `cirq/circuits/circuit.py`:
Start cirq
...
Start cirq.study
End cirq.study
Start cirq.circuits
ERROR: cirq.circuits.circuit imported cirq.vis
Start cirq.vis
End cirq.vis
... # Possibly more errors caused by the first.
End cirq.circuits
Start cirq.schedules
End cirq.schedules
...
End cirq
Invalid import: cirq.circuits.circuit imported cirq.vis
Args:
depth: How deep in the module tree to verify. If depth is 1, verifies
that submodules of cirq like cirq.ops doesn't import cirq.circuit.
If depth is 2, verifies that submodules and sub-submodules like
cirq.ops.raw_types doesn't import cirq.ops.common_gates or
cirq.circuit.
track_others: If True, logs where cirq first imports an external package
in addition to logging when cirq modules are imported.
timeit: Measure the import time of cirq and each submodule and print a
report of the worst. Includes times for external packages used by
cirq if `track_others` is True.
Returns:
True is no import issues, False otherwise.
"""
fail_list = []
start_times = {}
load_times = {}
current_path: List[str] = []
currently_running_paths: List[List[str]] = [[]]
import_depth = 0
indent = ' ' * 2
def wrap_module(module):
nonlocal import_depth
start_times[module.__name__] = time.perf_counter()
path = module.__name__.split('.')
if path[0] != 'cirq':
if len(path) == 1:
print(f'{indent * import_depth}Other {module.__name__}')
return module
currently_running_paths.append(path)
if len(path) == len(current_path) + 1 and path[:-1] == current_path:
# Move down in tree
current_path.append(path[-1])
else:
# Jump somewhere else in the tree
handle_error(currently_running_paths[-2], path)
current_path[:] = path
if len(path) <= depth + 1:
print(f'{indent * import_depth}Start {module.__name__}')
import_depth += 1
return module
def after_exec(module):
nonlocal import_depth
load_times[module.__name__] = time.perf_counter() - start_times[module.__name__]
path = module.__name__.split('.')
if path[0] != 'cirq':
return
assert path == currently_running_paths.pop(), 'Unexpected import state'
if len(path) <= depth + 1:
import_depth -= 1
print(f'{indent * import_depth}End {module.__name__}')
if path == current_path:
# No submodules were here
current_path.pop()
elif len(path) == len(current_path) - 1 and path == current_path[:-1]:
# Move up in tree
current_path.pop()
else:
# Jump somewhere else in the tree
current_path[:] = path[:-1]
def handle_error(import_from, import_to):
if import_from[: depth + 1] != import_to[: depth + 1]:
msg = f"{'.'.join(import_from)} imported {'.'.join(import_to)}"
fail_list.append(msg)
print(f'ERROR: {msg}')
# Import wrap_module_executions without importing cirq
orig_path = list(sys.path)
project_dir = os.path.dirname(os.path.dirname(__file__))
cirq_dir = os.path.join(project_dir, 'cirq')
sys.path.append(cirq_dir) # Put cirq/_import.py in the path.
from cirq._import import wrap_module_e
|
xecutions # type: ignore
sys.path[:] = orig_path # Restore the path.
sys.path.append(project_dir) # Ensure the cirq package is in the path.
# note that with the cirq.google injection we do change the metapath
with wrap_module_executions('' if track_others else 'cirq', wrap_module, after_exec, False):
# Import cirq with instrumentation
import
|
cirq # pylint: disable=unused-import
sys.path[:] = orig_path # Restore the path.
if fail_list:
print()
# Only print the first because later errors are often caused by the
# first and not as helpful.
print(f'Invalid import: {fail_list[0]}')
if timeit:
worst_loads = collections.Counter(load_times).most_common(15)
print()
print('Worst load times:')
for name, dt in worst_loads:
print(f'{dt:.3f} {name}')
return not fail_list
FAIL_EXIT_CODE = 65
def test_no_circular_imports():
"""Runs the test in a subprocess because cirq has already been imported
before in an earlier test but this test needs to control the import process.
"""
status = subprocess.call([sys.executable, __file__])
if status == FAIL_EXIT_CODE:
# coverage: ignore
raise Exception('Invalid import. See captured output for details.')
elif status != 0:
# coverage: ignore
raise RuntimeError('Error in subprocess')
if __name__ == '__main__':
args = parser.parse_args()
success = verify_import_tree(track_others=args.others, timeit=args.time)
sys.exit(0 if success else FAIL_EXIT_CODE)
|
deklungel/iRulez
|
src/webservice/_inputPin.py
|
Python
|
mit
| 1,909
| 0.001572
|
from flask import jsonify
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Table, Column, Integer, ForeignKey
from src.webservice.base import Base
from src.webservice._action import Action
db = SQLAlchemy()
Base.query = db.session.query_property()
class Input(Base):
__tablename__ = 'tbl_InputPin'
id = db.Column(db.Integer, primary_key=True)
number = db.Column(db.Integer())
name = db.Column(db.String(50))
parent_id = db.Column(db.Integer, db.ForeignKey('tbl_Arduino.id'))
parent = db.relationship('Device')
time_between_clicks = db.Column(db.Float(50))
actions = db.relationship("Action", secondary="tbl_InputPin_Action")
@staticmethod
def get_all_inputs():
inputs = Input.query.outerjoin(Input.actions).all()
output = []
for input in inputs:
actions = []
actions_id = []
for action in input.actions:
actions.append(action.name)
actions_id.append(action.id)
input_data = {'id': input.id, 'name': input.name, 'device_name': input.parent.name, 'actions_id': actions_id,
'number': input.number, 'time_between_clicks': input.time_between_clicks, 'actio
|
ns': actions}
output.append(input_data)
db.session.commit()
return jsonify({'response': output})
@staticmethod
def update_input(request):
data = request.get_json()
input = db.session.query(Input).filter_by(id=data['id']).first()
if 'name' in
|
data:
input.name = data['name']
if 'time_between_clicks' in data:
input.time_between_clicks = data['time_between_clicks']
if 'actions_id':
actions = Action.get_actions(data['actions_id'])
input.actions = actions
db.session.commit()
return jsonify({'result': 'User has been changed'})
|
seenaburns/Chroma
|
setup.py
|
Python
|
bsd-3-clause
| 768
| 0.001302
|
from distutils.core import
|
setup
setup(
name='Chroma',
version='0.2.0',
author='Seena Burns',
author_email='hello@seenaburns.com',
url='https://github.com/seenaburns/Chroma',
license=open('LICENSE.txt').read(),
description='Color handlin
|
g made simple.',
long_description=open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
packages=['chroma'],
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7'
),
)
|
ygenc/onlineLDA
|
onlineldavb_new/build/scipy/scipy/odr/odrpack.py
|
Python
|
gpl-3.0
| 39,749
| 0.001157
|
"""
Python wrappers for Orthogonal Distance Regression (ODRPACK).
Classes
=======
Data -- stores the data and weights to fit against
RealData -- stores data with standard deviations and covariance matrices
Model -- stores the model and its related information
Output -- stores all of the output from an ODR run
ODR -- collects all data and runs the fitting routine
Exceptions
==========
odr_error -- error sometimes raised inside odr() and can be raised in the
fitting functions to tell ODRPACK to halt the procedure
odr_stop -- error to raise in fitting functions to tell ODRPACK that the data or
parameters given are invalid
Use
===
Basic use:
1) Define the function you want to fit against.
::
def f(B, x):
''' Linear function y = m*x + b '''
return B[0]*x + B[1]
# B is a vector of the parameters.
# x is an array of the current x values.
# x is same format as the x passed to Data or RealData.
# Return an array in the same format as y passed to Data or RealData.
2) Create a Model.
::
linear = Model(f)
3) Create a Data or RealData instance.
::
mydata = Data(x, y, wd=1./power(sx,2), we=1./power(sy,2))
or
::
mydata = RealData(x, y, sx=sx, sy=sy)
4) Instantiate ODR with your data, model and initial parameter estimate.
::
myodr = ODR(mydata, linear, beta0=[1., 2.])
5) Run the fit.
::
myoutput = myodr.run()
6) Examine output.
::
myoutput.pprint()
Read the docstrings and the accompanying tests for more advanced usage.
Notes
=====
* Array formats -- FORTRAN stores its arrays in memory column first, i.e. an
array element A(i, j, k) will be next to A(i+1, j, k). In C and, consequently,
NumPy, arrays are stored row first: A[i, j, k] is next to A[i, j, k+1]. For
efficiency and convenience, the input and output arrays of the fitting
function (and its Jacobians) are passed to FORTRAN without transposition.
Therefore, where the ODRPACK documentation says that the X array is of shape
(N, M), it will be passed to the Python function as an array of shape (M, N).
If M==1, the one-dimensional case, then nothing matters; if M>1, then your
Python functions will be dealing with arrays that are indexed in reverse of
the ODRPACK documentation. No real biggie, but watch out for your indexing of
the Jacobians: the i,j'th elements (@f_i/@x_j) evaluated at the n'th
observation will be returned as jacd[j, i, n]. Except for the Jacobians, it
really is easier to deal with x[0] and x[1] than x[:,0] and x[:,1]. Of course,
you can always use the transpose() function from scipy explicitly.
* Examples -- See the accompanying file test/test.py for examples of how to set
up fits of your own. Some are taken from the User's Guide; some are from
other sources.
* Models -- Some common models are instantiated in the accompanying module
models.py . Contributions are welcome.
Credits
=======
* Thanks to Arnold Moene and Gerard Vermeulen for fixing some killer bugs.
Robert Kern
robert.kern@gmail.com
"""
import numpy
from scipy.odr import __odrpack
__all__ = ['odr', 'odr_error', 'odr_stop', 'Data', 'RealData', 'Model',
'Output', 'ODR']
odr = __odrpack.odr
odr_error = __odrpack.odr_error
odr_stop = __odrpack.odr_stop
def _conv(obj, dtype=None):
""" Convert an object to the preferred form for input to the odr routine.
"""
if obj is None:
return obj
else:
if dtype is None:
obj = numpy.asarray(obj)
else:
obj = numpy.asarray(obj, dtype)
if obj.shape == ():
# Scalar.
return obj.dtype.type(obj)
else:
return obj
def _report_error(info):
""" Interprets the return code of the odr routine.
Parameters
----------
info : int
The return code of the odr routine.
Returns
-------
problems : list(str)
A list of messages about why the odr() routine stopped.
"""
stopreason = ('Blank',
'Sum of squares convergence',
'Parameter convergence',
'Both sum of squares and parameter convergence',
'Iteration limit reached')[info % 5]
if info >= 5:
# questionable results or fatal error
I = (info/10000 % 10,
info/1000 % 10,
info/100 % 10,
info/10 % 10,
info % 10)
problems = []
if I[0] == 0:
if I[1] != 0:
problems.append('Derivatives possibly not correct')
if I[2] != 0:
problems.append('Error occurred in callback')
if I[3] != 0:
problems.append('Problem is not full rank at solution')
problems.append(stopreason)
elif I[0] == 1:
if I[1] != 0:
problems.append('N < 1')
if I[2] != 0:
problems.append('M < 1')
if I[3] != 0:
problems.append('NP < 1 or NP > N')
if I[4] != 0:
problems.append('NQ < 1')
elif I[0] == 2:
if I[1] != 0:
problems.append('LDY and/or LDX incorrect')
if I[2] != 0:
problems.append('LDWE, LD2WE, LDWD, and/or LD2WD incorrect')
if I[3] != 0:
problems.append('LDIFX, LDSTPD, and/or LDSCLD incorrect')
if I[4] != 0:
problems.append('LWORK and/or LIWORK too small')
elif I[0] == 3:
if I[1] != 0:
problems.append('STPB and/or STPD incorrect')
if I[2] != 0:
problems.append('SCLB and/or SCLD incorrect')
if I[3] != 0:
problems.append('WE incorrect')
if I[4] != 0:
problems.append('WD incorrect')
elif I[0] == 4:
problems.append('Error in derivatives')
elif I[0] == 5:
problems.append('Error occurred in callback')
elif I[0] == 6:
problems.append('Numerical error detected')
return problems
else:
return [stopreason]
class Data(object):
"""
scipy.odr.Data(x, y=None, we=None, wd=None, fix=None, meta={})
The Data class stores the data to fit.
Parameters
----------
x : array_like
Input data for regression.
y : array_like, optional
Input data for regression.
we : array_like, optional
If `we` is a scalar, then that value is used for all data points (and
all dimensions of the response variable).
If `we` is a rank-1 array of length q (the dimensionality of the
response variable), then this vector is the diagonal of the covariant
weighting matrix for all data points.
If `we` is a rank-1 array of length n (the number of data points), then
the i'th element is the weight for the i'th response variable
observation (single-dimensional only).
If `we` is a rank-2 array of shape (q, q), then this is the full
covari
|
ant weighting matrix broadcast to each observation.
If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the
diagonal of the covariant weighting matrix for the i'th observation.
If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the
full specification of the covariant weighting matrix for each
observation.
If the fit is implicit, then only a posit
|
ive scalar value is used.
wd : array_like, optional
If `wd` is a scalar, then that value is used for all data points
(and all dimensions of the input variable). If `wd` = 0, then the
covariant weighting matrix for each observation is set to the identity
matrix (so each dimension of each observation has the same weight).
If `wd` is a rank-1 array of length m (the dimensionality of the input
variable), then this vector is the diagonal of the covariant weighting
matrix for all data points.
If `wd` is a rank-1 array of length n (the number of data points), then
the i'th element is the weight for the i'th input varia
|
CLVsol/odoo_addons
|
clv_medicament_template/wkf/__init__.py
|
Python
|
agpl-3.0
| 1,439
| 0.011814
|
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public
|
License as published by #
# the Free Software Foundation, either version 3 of the License, or
|
#
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
import clv_medicament_template_wkf
|
sidhart/antlr4
|
runtime/Python2/src/antlr4/dfa/DFASerializer.py
|
Python
|
bsd-3-clause
| 3,848
| 0.002339
|
#
# [The "BSD license"]
# Copyright (c) 2012 Terence Parr
# Copyright (c) 2012 Sam Harwell
# Copyright (c) 2014 Eric Vergnaud
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the f
|
ollowing disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or p
|
romote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#/
# A DFA walker that knows how to dump them to serialized strings.#/
from io import StringIO
from antlr4.Utils import str_list
class DFASerializer(object):
def __init__(self, dfa, literalNames=None, symbolicNames=None):
self.dfa = dfa
self.literalNames = literalNames
self.symbolicNames = symbolicNames
def __str__(self):
return unicode(self)
def __unicode__(self):
if self.dfa.s0 is None:
return None
with StringIO() as buf:
for s in self.dfa.sortedStates():
n = 0
if s.edges is not None:
n = len(s.edges)
for i in range(0, n):
t = s.edges[i]
if t is not None and t.stateNumber != 0x7FFFFFFF:
buf.write(self.getStateString(s))
label = self.getEdgeLabel(i)
buf.write(u"-")
buf.write(label)
buf.write(u"->")
buf.write(self.getStateString(t))
buf.write(u'\n')
output = buf.getvalue()
if len(output)==0:
return None
else:
return output
def getEdgeLabel(self, i):
if i==0:
return u"EOF"
if self.literalNames is not None and i<=len(self.literalNames):
return self.literalNames[i-1]
elif self.symbolicNames is not None and i<=len(self.symbolicNames):
return self.symbolicNames[i-1]
else:
return unicode(i-1)
def getStateString(self, s):
n = s.stateNumber
baseStateStr = ( u":" if s.isAcceptState else u"") + u"s" + unicode(n) + \
( u"^" if s.requiresFullContext else u"")
if s.isAcceptState:
if s.predicates is not None:
return baseStateStr + u"=>" + str_list(s.predicates)
else:
return baseStateStr + u"=>" + unicode(s.prediction)
else:
return baseStateStr
class LexerDFASerializer(DFASerializer):
def __init__(self, dfa):
super(LexerDFASerializer, self).__init__(dfa, None)
def getEdgeLabel(self, i):
return u"'" + unichr(i) + u"'"
|
shalakhin/disqus-api
|
disqus_api/api.py
|
Python
|
mit
| 1,082
| 0.005545
|
import requests
import json
class DisqusAPI(object):
"""
Lightweight solution to make API calls to Disqus:
More info:
https://disqus.com/api/docs
"""
def __init__(self,
api_key,
api_secret,
version='3.0',
formats='json'
):
self.api_key = api_key
self.api_secret = api_secret
self.version = version
self.formats = formats
def get(self, method, **kwargs):
"""
Make get requests to retrieve data from Disqus
"""
endpoint = 'https://disqus.com/api/{version}/{method}.{formats}'
url = endpoint.format(
version=self.version,
method=method.replace('.', '/'),
formats=self.formats
)
kwargs.update({
'api_key': self.api_k
|
ey,
'api_secret': self.api_secret,
})
|
response = requests.get(url, params=kwargs)
# TODO: support other formats like rss
if self.formats == 'json':
return json.loads(response.content.decode())
|
hideoussquid/aureus-12-bitcore
|
qa/rpc-tests/multi_rpc.py
|
Python
|
mit
| 4,609
| 0.005424
|
#!/usr/bin/env python2
# Copyright (c) 2015 The Aureus Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test mulitple rpc user config option rpcauth
#
from test_framework.test_framework import AureusTestFramework
from test_framework.util import *
import base64
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class HTTPBasicsTest (AureusTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
#Append rpcauth to aureus.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.path.join(self.options.tmpdir+"/node0", "aureus.conf"), 'a') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urlparse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + base64.b64encode(authpair)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.req
|
uest('POST', '/', '{"method": "getbestblockhash"}', headers)
|
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
nuobit/odoo-addons
|
partner_default_journal/models/res_partner.py
|
Python
|
agpl-3.0
| 511
| 0.001957
|
# Copyright NuoBiT Solutions, S.L. (<https://www.nuobit.com>)
# Eric Antones <eantones@nuobit.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from odoo import fields, models
class ResPartner(model
|
s.Mod
|
el):
_inherit = "res.partner"
sale_journal_id = fields.Many2one(
"account.journal", "Default journal", domain=[("type", "=", "sale")]
)
purchase_journal_id = fields.Many2one(
"account.journal", "Default journal", domain=[("type", "=", "purchase")]
)
|
nlhepler/pysam
|
tests/AlignedSegment_test.py
|
Python
|
mit
| 17,059
| 0.000117
|
import os
import pysam
import unittest
from TestUtils import checkFieldEqual
import copy
SAMTOOLS = "samtools"
WORKDIR = "pysam_test_work"
DATADIR =
|
"pysam_data"
class ReadTest(unittest.TestCase):
def buildRead(self):
'''build an example read.'''
a = pysam.AlignedSegment()
a.query_name = "read_12345"
a.query_sequence = "ACGT" * 10
a.flag = 0
a.reference_id = 0
a.reference_start = 20
a.mapping_quality = 20
|
a.cigartuples = ((0, 10), (2, 1), (0, 9), (1, 1), (0, 20))
a.next_reference_id = 0
a.next_reference_start = 200
a.template_length = 167
a.query_qualities = pysam.fromQualityString("1234") * 10
# todo: create tags
return a
class TestAlignedSegment(ReadTest):
'''tests to check if aligned read can be constructed
and manipulated.
'''
def testEmpty(self):
a = pysam.AlignedSegment()
self.assertEqual(a.query_name, None)
self.assertEqual(a.query_sequence, None)
self.assertEqual(pysam.toQualityString(a.query_qualities), None)
self.assertEqual(a.flag, 0)
self.assertEqual(a.reference_id, 0)
self.assertEqual(a.mapping_quality, 0)
self.assertEqual(a.cigartuples, None)
self.assertEqual(a.tags, [])
self.assertEqual(a.next_reference_id, 0)
self.assertEqual(a.next_reference_start, 0)
self.assertEqual(a.template_length, 0)
def testStrOfEmptyRead(self):
a = pysam.AlignedSegment()
s = str(a)
self.assertEqual(
"None\t0\t0\t0\t0\tNone\t0\t0\t0\tNone\tNone\t[]",
s)
def testSettingTagInEmptyRead(self):
'''see issue 62'''
a = pysam.AlignedSegment()
a.tags = (("NM", 1),)
a.query_qualities = None
self.assertEqual(a.tags, [("NM", 1), ])
def testCompare(self):
'''check comparison functions.'''
a = self.buildRead()
b = self.buildRead()
self.assertEqual(0, a.compare(b))
self.assertEqual(0, b.compare(a))
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
b.tid = 2
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
def testHashing(self):
a = self.buildRead()
b = self.buildRead()
self.assertEqual(hash(a), hash(b))
b.tid = 2
self.assertNotEqual(hash(a), hash(b))
def testUpdate(self):
'''check if updating fields affects other variable length data
'''
a = self.buildRead()
b = self.buildRead()
# check qname
b.query_name = "read_123"
checkFieldEqual(self, a, b, "query_name")
b.query_name = "read_12345678"
checkFieldEqual(self, a, b, "query_name")
b.query_name = "read_12345"
checkFieldEqual(self, a, b)
# check cigar
b.cigartuples = ((0, 10), )
checkFieldEqual(self, a, b, "cigartuples")
b.cigartuples = ((0, 10), (2, 1), (0, 10))
checkFieldEqual(self, a, b, "cigartuples")
b.cigartuples = ((0, 10), (2, 1), (0, 9), (1, 1), (0, 20))
checkFieldEqual(self, a, b)
# check seq
b.query_sequence = "ACGT"
checkFieldEqual(self,
a, b,
("query_sequence", "query_qualities", "query_length"))
b.query_sequence = "ACGT" * 3
checkFieldEqual(self,
a, b,
("query_sequence", "query_qualities", "query_length"))
b.query_sequence = "ACGT" * 10
checkFieldEqual(self, a, b, ("query_qualities",))
# reset qual
b = self.buildRead()
# check flags:
for x in (
"is_paired", "is_proper_pair",
"is_unmapped", "mate_is_unmapped",
"is_reverse", "mate_is_reverse",
"is_read1", "is_read2",
"is_secondary", "is_qcfail",
"is_duplicate", "is_supplementary"):
setattr(b, x, True)
self.assertEqual(getattr(b, x), True)
checkFieldEqual(self, a, b, ("flag", x,))
setattr(b, x, False)
self.assertEqual(getattr(b, x), False)
checkFieldEqual(self, a, b)
def testUpdate2(self):
'''issue 135: inplace update of sequence and quality score.
This does not work as setting the sequence will erase
the quality scores.
'''
a = self.buildRead()
a.query_sequence = a.query_sequence[5:10]
self.assertEqual(pysam.toQualityString(a.query_qualities), None)
a = self.buildRead()
s = pysam.toQualityString(a.query_qualities)
a.query_sequence = a.query_sequence[5:10]
a.query_qualities = pysam.fromQualityString(s[5:10])
self.assertEqual(pysam.toQualityString(a.query_qualities), s[5:10])
def testLargeRead(self):
'''build an example read.'''
a = pysam.AlignedSegment()
a.query_name = "read_12345"
a.query_sequence = "ACGT" * 200
a.flag = 0
a.reference_id = 0
a.reference_start = 20
a.mapping_quality = 20
a.cigartuples = ((0, 4 * 200), )
a.next_reference_id = 0
a.next_reference_start = 200
a.template_length = 167
a.query_qualities = pysam.fromQualityString("1234") * 200
return a
def testUpdateTlen(self):
'''check if updating tlen works'''
a = self.buildRead()
oldlen = a.template_length
oldlen *= 2
a.template_length = oldlen
self.assertEqual(a.template_length, oldlen)
def testPositions(self):
a = self.buildRead()
self.assertEqual(a.get_reference_positions(),
[20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59])
self.assertEqual(a.get_aligned_pairs(),
[(0, 20), (1, 21), (2, 22), (3, 23), (4, 24),
(5, 25), (6, 26), (7, 27), (8, 28), (9, 29),
(None, 30),
(10, 31), (11, 32), (12, 33), (13, 34), (14, 35),
(15, 36), (16, 37), (17, 38), (18, 39), (19, None),
(20, 40), (21, 41), (22, 42), (23, 43), (24, 44),
(25, 45), (26, 46), (27, 47), (28, 48), (29, 49),
(30, 50), (31, 51), (32, 52), (33, 53), (34, 54),
(35, 55), (36, 56), (37, 57), (38, 58), (39, 59)])
self.assertEqual(
a.get_reference_positions(),
[x[1] for x in a.get_aligned_pairs()
if x[0] is not None and x[1] is not None])
# alen is the length of the aligned read in genome
self.assertEqual(a.reference_length,
a.get_aligned_pairs()[-1][0] + 1)
# aend points to one beyond last aligned base in ref
self.assertEqual(a.get_reference_positions()[-1],
a.reference_end - 1)
def testFullReferencePositions(self):
'''see issue 26'''
a = self.buildRead()
a.cigar = [(4, 30), (0, 20), (1, 3), (0, 47)]
self.assertEqual(100,
len(a.get_reference_positions(full_length=True)))
def testBlocks(self):
a = self.buildRead()
self.assertEqual(a.get_blocks(),
[(20, 30), (31, 40), (40, 60)])
def test_get_aligned_pairs_soft_clipping(self):
a = pysam.AlignedSegment()
a.query_name = "read_12345"
a.query_sequence = "ACGT" * 10
a.flag = 0
a.reference_id = 0
a.reference_start = 20
a.mapping_quality = 20
a.cigartuples = ((4, 2), (0, 35), (4, 3))
a.query_qualities = pysam.fromQualityString("1234") * 10
self.ass
|
pclubuiet/website
|
home/views.py
|
Python
|
gpl-3.0
| 3,396
| 0.008539
|
from django import views
from django.shortcuts import render, get_object_or_404
from django.views.generic import TemplateView
from django.views.generic.edit import CreateView
from .models import *
from .forms import *
import requests
import http
from django.urls import reverse_lazy
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
class Template404(TemplateView):
template_name = "404.html"
class Home(TemplateView):
template_name = 'home/home.html'
class Topics(views.View):
def get(self, request, *args, **kwargs):
return render(request, "home/resources/topics.html", {'topics': Topic.objects.all()})
class Resources(views.View):
def get(self, request, pk, *args, **kwargs):
topic = get_object_or_404(Topic, pk=pk)
return render(request, "home/resou
|
rces/resources.html", {'resources': topic.resource_set.all(), 'topic' : topic})
class BlogPostList(views.View):
def get(self, request, *args, **kwargs):
posts = BlogPost.objects.all(
|
)
return render(request, "home/blog/index.html", {'posts': posts})
class BlogPostView(views.View):
def get(self, request, pk, *args, **kwargs):
post = get_object_or_404(BlogPost, pk=pk)
return render(request, "home/blog/blog_post.html", {'post': post})
class Leaderboard(views.View):
def get(self, request, *args, **kwargs):
users = Users.objects.all()
for user in users:
connected = False
while not connected:
try:
user_name = user.github_handle
response = requests.get('https://api.github.com/search/issues?sort=created&q=author:{}&type:pr&per_page=100'.format(user_name), verify = False).json()
pr_count = 0
print(response)
for obj in response['items']:
if('pull_request' in obj):
if('2018-09-30T00:00:00Z'<obj['created_at']<'2018-10-31T23:59:59Z'):
pr_count += 1
user.pr_count = pr_count
user.save()
connected = True
except:
pass
return render(request, 'home/leaderboard.html', {'users': users})
class RegisterUser(CreateView):
form_class = RegisterUserForm
template_name = "home/registeruser.html"
success_url = reverse_lazy('home:home')
@csrf_exempt
def GithubEmailCheck(request):
github_handle = request.POST.get('github_handle')
email = request.POST.get('email')
print("Received ", github_handle)
users = Users.objects.all()
for user in users:
if user.github_handle == github_handle:
return JsonResponse({'message' : 'Duplicate Github Handle'})
if user.email == email:
return JsonResponse({'message' : 'Duplicate Email'})
return JsonResponse({'message' : 'New'})
@csrf_exempt
def GithubCheck(request):
github_handle = request.POST.get('github_handle')
response = requests.get("https://api.github.com/users/{}".format(github_handle), verify = False).json()
print("https://api.github.com/users/{}".format(github_handle))
if ('login' in response):
print("Found")
return JsonResponse({'message' : 'Found'})
else:
return JsonResponse({'message' : 'Not Found'})
|
SirDavidLudwig/KattisSolutions
|
problems/sidewayssorting/sidewayssorting.py
|
Python
|
gpl-3.0
| 372
| 0.024194
|
import sys
r, c = map(int, input().split())
while r and c:
lines = [input().strip() for i in range(r)]
rotatedLines = []
for i in range(c):
rotatedLines.append("".join([lines[j][i] for j in range(r)]))
rotatedLines.sort(key=lambda s: s.lower())
for i in
|
range(r):
print("".join([rotatedLines[j][i] for j in range(c)]))
print()
r
|
, c = map(int, input().split())
|
mbramr/My-Zork
|
room.py
|
Python
|
mit
| 1,035
| 0.017391
|
import json
import sqlite3
def get_room(id, dbfile):
ret = None
con = sqlite3.connect(dbfile)
for row in con.execute("select json from rooms where id=?",(id,)):
jsontext = row[0]
# Outputs the JSON response
#print("json = " + jsontext)
d = json.loads(jsontext)
d['id'] = id
ret = Room(**d)
break
con.close()
return ret
class Room():
def __init__(self, id=0, name="A room", description="An empty room", neighbors={}):
self.id = id
self.name = name
self.description = description
|
self.neighbors = neighbors
def _neighbor(self, direction):
if direction in self.neighbors:
return self.neighbors[direction]
else:
return None
def north(self):
return self._neighbor('n')
def south(self):
return self._neighbor('s')
def east(self):
|
return self._neighbor('e')
def west(self):
return self._neighbor('w')
|
mozilla/normandy
|
normandy/studies/migrations/0002_auto_20180510_2256.py
|
Python
|
mpl-2.0
| 272
| 0.003676
|
# Generated by Django 2.0.5 on 2018-05-10 22:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("studies", "0001_initial")]
operations = [migrations.Al
|
terModelOptions(nam
|
e="extension", options={"ordering": ("-id",)})]
|
feilaoda/FlickBoard
|
project/cache/files.py
|
Python
|
mit
| 3,576
| 0.007271
|
from markupsafe import escape
import re
from pymongo.objectid import ObjectId
from pymongo.errors import InvalidId
from app.people.people_model import People
from app.board.board_model import BoardTopic, BoardNode
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
from lib.filter import none2string,mentions,video, urlink
from lib.utils im
|
port html_escape, br_escape
cache_opts =
|
{
'cache.type': 'file',
'cache.data_dir': '/tmp/caches/data',
'cache.lock_dir': '/tmp/caches/lock',
'cache.regions': 'short_term, long_term',
#'cache.short_term.type': 'ext:memcached',
#'cache.short_term.url': '127.0.0.1.11211',
'cache.short_term.type': 'file',
'cache.short_term.expire': '1200',
'cache.long_term.type': 'file',
'cache.long_term.expire': '3600',
}
cache = CacheManager(**parse_cache_config_options(cache_opts))
@cache.region('short_term', 'cached_people')
def get_cached_people(people_id):
try:
people = People.objects.with_id(people_id)
return people
except InvalidId, error:
pass
return None
def fetch_cached_people(people_id, reflush=False):
if reflush:
cache.region_invalidate(get_cached_people, None, 'cached_people', people_id)
return get_cached_people(people_id)
@cache.region('long_term', 'cached_board_topic')
def get_cached_board_topic(topic_id):
try:
topic = BoardTopic.objects.with_id(topic_id)
if topic is None:
return None
if topic.content:
topic.html_content = urlink(escape(topic.content)) #urlink((mentions(youku(escape(topic.content)) ) ) , trim_url_limit=30)
else:
topic.html_content = ''
return topic
except Exception, error:
return None
return None
def fetch_cached_board_topic(topic_id, reflush=False):
if reflush:
cache.region_invalidate(get_cached_board_topic, None, 'cached_board_topic', topic_id)
return get_cached_board_topic(topic_id)
@cache.region('long_term', 'cached_board_topic_morecontent')
def get_cached_board_topic_morecontent(topic_id):
try:
topic = fetch_cached_board_topic(topic_id)
if topic is None:
return None
html_more_content = ''
if topic.more_content:
html_more_content = br_escape(urlink(escape(topic.more_content))) #urlink((mentions(youku(escape(topic.content)) ) ) , trim_url_limit=30)
extra_content = ''
if topic.video_urls:
video_html = '<p></p>'
for url in topic.video_urls:
video_html += video(url)
extra_content = video_html
return html_more_content + extra_content
except Exception, error:
return None
return None
def fetch_cached_board_topic_morecontent(topic_id, reflush=False):
if reflush:
cache.region_invalidate(get_cached_board_topic, None, 'cached_board_topic_morecontent', topic_id)
return get_cached_board_topic_morecontent(topic_id)
@cache.region('long_term', 'cached_board_nodelist')
def get_cached_board_nodelist(cache='board_nodelist'):
try:
nodelist = BoardNode.get_top_nodes()
return list(nodelist)
except InvalidId, error:
pass
return None
def fetch_cached_board_nodelist(reflush=False):
if reflush:
cache.region_invalidate(get_cached_board_nodelist, None, 'cached_board_nodelist', 'board_nodelist')
return get_cached_board_nodelist('board_nodelist')
|
tensor-tang/Paddle
|
python/paddle/fluid/tests/unittests/test_program_code.py
|
Python
|
apache-2.0
| 2,769
| 0
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import unittest
from multiprocessing import
|
Process
import signal
import numpy
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.layers.io import ListenAndServ
from paddle.fluid.layers.io import Recv
from paddle.fluid.layers.io import Send
import paddle.fluid.layers.ops as op
|
s
from paddle.fluid.transpiler.details import program_to_code
class TestProgram2Code(unittest.TestCase):
def test_print(self):
place = fluid.CPUPlace()
self.init_serv(place)
self.init_client(place, 9123)
def init_serv(self, place):
main = fluid.Program()
with fluid.program_guard(main):
serv = ListenAndServ("127.0.0.1:0", ["X"], optimizer_mode=False)
with serv.do():
out_var = main.global_block().create_var(
name="scale_0.tmp_0",
psersistable=True,
dtype="float32",
shape=[32, 32])
x = layers.data(
shape=[32, 32],
dtype='float32',
name="X",
append_batch_size=False)
fluid.initializer.Constant(value=1.0)(x, main.global_block())
ops._scale(x=x, scale=10.0, out=out_var)
program_to_code(main)
def init_client(self, place, port):
main = fluid.Program()
with fluid.program_guard(main):
x = layers.data(
shape=[32, 32],
dtype='float32',
name='X',
append_batch_size=False)
fluid.initializer.Constant(value=2.3)(x, main.global_block())
get_var = main.global_block().create_var(
name="scale_0.tmp_0", # server side var
dtype="float32",
persistable=False,
shape=[32, 32])
fluid.initializer.Constant(value=2.3)(get_var, main.global_block())
Send("127.0.0.1:%d" % port, [x])
o = Recv("127.0.0.1:%d" % port, [get_var])
program_to_code(main)
if __name__ == "__main__":
unittest.main()
|
pymal-developers/pymal
|
pymal/consts.py
|
Python
|
bsd-3-clause
| 496
| 0
|
__authors__ = ""
__copyright__ = "(c) 2014, pymal"
__li
|
cense__ = "BSD License"
__contact__ = "Name Of Current Guardian of this file <email@address>"
USER_AGENT = 'api-indiv-0829BA2B33942A4A5E6338FE05EFB8A1'
HOST_NAME = "http://myanimelist.net"
DEBUG = False
RETRY_NUMBER = 4
RETRY_SLEEP = 1
SHORT_SITE_FORMAT_TIME = '%b %Y'
LONG_SITE_FORMAT_TIME = '%b %d, %Y'
MALAPPINFO_FORMAT_TIME = "%Y-%m-%d
|
"
MALAPPINFO_NONE_TIME = "0000-00-00"
MALAPI_FORMAT_TIME = "%Y%m%d"
MALAPI_NONE_TIME = "00000000"
|
inflector/atomspace
|
tests/cython/bindlink/test_bindlink.py
|
Python
|
agpl-3.0
| 7,002
| 0.001285
|
from unittest import TestCase
import os
from opencog.atomspace import AtomSpace, TruthValue, Atom, types
from opencog.bindlink import stub_bindlink, bindlink, single_bindlink,\
first_n_bindlink, af_bindlink, \
satisfaction_link, satisfying_set, \
satisfying_element, first_n_satisfying_set, \
execute_atom, evaluate_atom
from opencog.type_constructors import *
from opencog.utilities import initialize_opencog, finalize_opencog
from test_functions import green_count, red_count
__author__ = 'Curtis Faith'
class BindlinkTest(TestCase):
bindlink_atom = None
getlink_atom = None
atomspace = AtomSpace()
starting_size = 0
def setUp(self):
print "setUp - atomspace = ", self.atomspace
# Clear atoms from previous test
self.atomspace.clear()
# Initialize Python
initialize_o
|
pencog(self.atomspace)
set_type_ctor_atomspace(self.atomspace)
# Define several animals and something of a di
|
fferent type as well
InheritanceLink( ConceptNode("Frog"), ConceptNode("animal"))
InheritanceLink( ConceptNode("Zebra"), ConceptNode("animal"))
InheritanceLink( ConceptNode("Deer"), ConceptNode("animal"))
InheritanceLink( ConceptNode("Spaceship"), ConceptNode("machine"))
# Define a graph search query
self.bindlink_atom = \
BindLink(
# The variable node to be grounded.
VariableNode("$var"),
# The pattern to be grounded.
InheritanceLink(
VariableNode("$var"),
ConceptNode("animal")
),
# The grounding to be returned.
VariableNode("$var")
# bindlink needs a handle
)
# Define a pattern to be grounded
self.getlink_atom = \
GetLink(
InheritanceLink(
VariableNode("$var"),
ConceptNode("animal")
)
)
# Remember the starting atomspace size.
self.starting_size = self.atomspace.size()
def tearDown(self):
print "tearDown - atomspace = ", self.atomspace
# Can't do this; finalize can be called only once, ever, and
# then never again. The second call will never follow through.
# Also, cannot create and delete atomspaces here; this will
# confuse the PythonEval singletonInstance.
# finalize_opencog()
# del self.atomspace
def test_stub_bindlink(self):
# Remember the starting atomspace size. This test should not
# change the atomspace.
starting_size = self.atomspace.size()
# Run bindlink.
atom = stub_bindlink(self.atomspace, self.bindlink_atom)
self.assertTrue(atom is not None)
# Check the ending atomspace size, it should be the same.
ending_size = self.atomspace.size()
self.assertEquals(ending_size, starting_size)
def _check_result_setlink(self, atom, expected_arity):
# Check if the atom is a SetLink
self.assertTrue(atom is not None)
self.assertEquals(atom.type, types.SetLink)
# Check the ending atomspace size, it should have added one SetLink.
ending_size = self.atomspace.size()
self.assertEquals(ending_size, self.starting_size + 1)
# The SetLink should have expected_arity items in it.
self.assertEquals(atom.arity, expected_arity)
def test_bindlink(self):
atom = bindlink(self.atomspace, self.bindlink_atom)
self._check_result_setlink(atom, 3)
def test_single_bindlink(self):
atom = single_bindlink(self.atomspace, self.bindlink_atom)
self._check_result_setlink(atom, 1)
def test_first_n_bindlink(self):
atom = first_n_bindlink(self.atomspace, self.bindlink_atom, 5)
self._check_result_setlink(atom, 3)
def test_af_bindlink(self):
atom = af_bindlink(self.atomspace, self.bindlink_atom)
# The SetLink is empty. ??? Should it be.
self._check_result_setlink(atom, 0)
def test_satisfying_set(self):
atom = satisfying_set(self.atomspace, self.getlink_atom)
self._check_result_setlink(atom, 3)
def test_satisfying_element(self):
atom = satisfying_element(self.atomspace, self.getlink_atom)
self._check_result_setlink(atom, 1)
def test_first_n_satisfying_set(self):
atom = first_n_satisfying_set(self.atomspace, self.getlink_atom, 5)
self._check_result_setlink(atom, 3)
def test_satisfy(self):
satisfaction_atom = SatisfactionLink(
VariableList(), # no variables
SequentialAndLink(
EvaluationLink(
GroundedPredicateNode("py: test_functions.stop_go"),
ListLink(
ConceptNode("green light")
)
),
EvaluationLink(
GroundedPredicateNode("py: test_functions.stop_go"),
ListLink(
ConceptNode("green light")
)
),
EvaluationLink(
GroundedPredicateNode("py: test_functions.stop_go"),
ListLink(
ConceptNode("red light")
)
),
EvaluationLink(
GroundedPredicateNode("py: test_functions.stop_go"),
ListLink(
ConceptNode("traffic ticket")
)
)
)
)
atom = satisfaction_link(self.atomspace, satisfaction_atom)
self.assertTrue(atom is not None and atom.mean <= 0.5)
self.assertEquals(green_count(), 2)
self.assertEquals(red_count(), 1)
def test_execute_atom(self):
result = execute_atom(self.atomspace,
ExecutionOutputLink(
GroundedSchemaNode("py: test_functions.add_link"),
ListLink(
ConceptNode("one"),
ConceptNode("two")
)
)
)
list_link = ListLink(
ConceptNode("one"),
ConceptNode("two")
)
self.assertEquals(result, list_link)
def test_evaluate_atom(self):
result = evaluate_atom(self.atomspace,
EvaluationLink(
GroundedPredicateNode("py: test_functions.bogus_tv"),
ListLink(
ConceptNode("one"),
ConceptNode("two")
)
)
)
self.assertEquals(result, TruthValue(0.6, 0.234))
|
pybursa/homeworks
|
i_pogorelko/hw4_i_pogorelko/hw4_solution1.py
|
Python
|
gpl-2.0
| 804
| 0.008706
|
#!/usr/bin/env python
# -*- coding: <
|
encoding name> -*-
__author__ = "i_pogorelko"
__email__ = "i.pogorelko@gmail.com"
__date__ = "2014-11-16"
text='Proin eget tortor risus. Cras ultricies ligula sed magna dictum porta.\
Donec rutrum cong
|
ue leo eget malesuada.'
def percentage_1(text):
print ''
print 'input: ', text
text = text.lower()
text2 = ''
for x in text:
if ord(x) >= ord('a') and ord(x) <= ord('z'):
text2 = text2 + x
d = {}
m = 0
for j in text2:
if d.has_key(j):
d[j] += 1.0
else:
d[j] = 1.0
m += 1
for key in d:
d[key] = float("%.1f" % ((d[key]/m)*100))
print '\noutput: ', d
return d
def percentage_2(text):
return percentage_1(text)
percentage_1(text)
|
JournalMap/GeoParsers
|
pyparser_geoparser_testing.py
|
Python
|
gpl-2.0
| 3,064
| 0.011624
|
#parser_testing.py
import os, sys, re, StringIO
sys.path.append('/Users/Jason/Dropbox/JournalMap/scripts/GeoParsers')
#from jmap_geoparser_re import *
from jmap_geoparser import *
#def test_parsing():
test = "blah blah blah 45º 23' 12'', 123º 23' 56'' and blah blah blah 32º21'59''N, 115º 23' 14''W blah blah blah"
coords = coordinateParser.searchString(test)
for coord in coords:
assert coordinate(coord).calcDD(), "Coordinate Transform Error for "+str(coord)
test = "45.234º, 123.43º"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 45.234, 'longitude': 123.43}
test = "-45º 23' 12'', -123º 23' 56''"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'lati
|
tude': -45.38667, 'longitude': 123.39889}
test = "32º21'59''N, 115º 23' 14''W"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 32.36639, 'longitude': -115.38722}
test = "12 43 56 North, 23 56 12 East"
assert coordinate
|
(coordinateParser.parseString(test)).calcDD() == {'latitude': 12.73222, 'longitude': 23.93667}
test = "52 15 10N, 0 01 54W"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 52.25278, 'longitude': -0.03167}
test = "52 35 31N, 1 28 05E"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 52.59194, 'longitude': 1.46806}
test = "30° 47' N, 34° 46' E"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 30.78333, 'longitude': 34.76667}
'''
test = "AT; 1 spm, CN 3-41, 21°00′ N, 112°30′ E"
for result, start, end in coordinateParser.scanString(test):
assert coordinate(result).calcDD() == {'latitude': 21.0, 'longitude': 112.5}
test = '27°43.886, 34°15.663'
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 27.73143, 'longitude': 34.26105}
test = '49°17’13”N, 13°40’18”E'
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 49.28694, 'longitude': 13.67167}
test = '45.9215º; -76.6219º'
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': -45.9215, 'longitude': 76.6219}
test = "latitude 32°47′47″ S and longitude 26°50′56″ E"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': -32.79639, 'longitude': 26.84889}
test = "N15°46′ W87°00'"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 15.76667, 'longitude': -87.0}
test = "latitude of 35°13', longitude of 4°11'"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 35.21667, 'longitude': 4.18333}
test = "expects to find coordinates: 52 degrees, 42 minutes north, 124 degrees, 50 minutes west"
assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 52.7, 'longitude': -124.83333}
# Should return an exception, but instead calculates latitude as 6º 10'
#test = "expects to find coordinates: 5°70'N, 73°46'W" # Minutes greater than 60
#test = "expects not to find: 4.5–5.0 "
'''
|
maraoz/proofofexistence
|
pycoin/serialize/__init__.py
|
Python
|
mit
| 200
| 0.01
|
import bi
|
nascii
def b2h(the_bytes):
return binascii.hexlify(the_bytes).decode("utf8")
def b2h_rev(the_bytes):
return binascii.hexlify(bytearra
|
y(reversed(the_bytes))).decode("utf8")
|
Fansion/sharefun
|
sharefun/__init__.py
|
Python
|
mit
| 5,284
| 0.000593
|
# -*- coding: utf-8 -*-
__author__ = 'frank'
from flask import Flask, request, url_for, render_template, g, session, flash
from flask_wtf.csrf import CsrfProtect
from flask_debugtoolbar import DebugToolbarExtension
from flask.ext.login import LoginManager
from flask.ext.moment import Moment
from . import filters, permissions
from .utils import signout_user
from .config import load_config
config = load_config()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.signin'
login_manager.login_message = '请先登陆以获得相应操作权限'
# convert python's encoding to utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def register_login_manager(app):
"""注册login_manager"""
login_manager.init_app(app)
def register_jinja(app):
# inject vars into template context
@app.context_processor
def inject_vars():
from .models import Category
from datetime import date
year = date.today().strftime("%Y")
return dict(Permission=permissions.Permission, categories=Category.query.all(), year=year)
# url generator for pagination
def url_for_other_page(page):
"""Generate url for pagination"""
view_args = request.view_args.copy()
args = request.args.copy().to_dict()
combined_args = dict(view_args.items() + args.items())
combined_args['page'] = page
return url_for(request.endpoint, **combined_args)
# same effect
# args = request.args.copy()
# args['page'] = page
# return url_for(request.endpoint, **args)
app.jinja_env.globals['url_for_other_page'] = url_for_other_page
def register_routes(app):
from .controllers import admin, site, user, auth
app.register_blueprint(site.bp, url_prefix='')
app.register_blueprint(admin.bp, url_prefix='/admin')
app.register_blueprint(user.bp, url_prefix='/user')
app.register_blueprint(auth.bp, url_prefix='/auth')
def register_error_handle(app):
@app.errorhandler(403)
def page_403(error):
return render_template('site/403.html'), 403
@app.errorhandler(404)
def page_404(error):
return render_template('site/404.html'), 404
@app.errorhandler(405)
def page_405(error):
return render_template('site/405.html'), 405
@app.errorhandler(500)
def page_500(error):
return render_template('site/500.html'), 500
def register_db(app):
from .models import db
db.init_app(app)
def register_mail(app):
from .utils import mail
mail.init_app(app)
def get_mail_handler():
import logging
from logging.handlers import SMTPHandler
credentials = (config.SMTP_USER, config.SMTP_PASSWORD)
mail_handler = SMTPHandler(config.SMTP_SERVER, config.SMTP_FROM,
config.SMTP_ADMIN, 'sf-log', credentials, ())
from logging import Formatter
mail_handler.setFormatter(Formatter('''
Message type: %(levelname)s
Location: %(pathname)s:%(lineno)d
Module: %(module)s
Function: %(funcName)s
Time: %(asctime)s
Message:
%(message)s
'''))
mail_handler.setLevel(logging.ERROR)
return mail_handler
def register_logger(app):
"""send error log to admin by smtp"""
if not app.debug:
app.logger.addHandler(get_mail_handler())
def register_moment(app):
moment = Moment(app)
def get_current_user():
"""获取当前user,同时进行session有效性的检测
放在utils.py会造成环路引用
"""
# 对应utils.py中signin_user方法
# 豆瓣登陆则验证邮箱, 非豆瓣登陆不验证邮箱直接返回空值退出
if 'signin_method' not in session:
return None
if 'user_id' not in session:
return None
# else:
# for k,v in session.iteritems():
# print k,v
from .models import User
# 此处是user_id而不是douban_id
user = User.query.filter(User.id == sessi
|
on['user_id']).first()
if not user:
signout_user()
return None
return user
def create_app():
app = Flas
|
k(__name__)
app.config.from_object(config)
# CSRF protect
CsrfProtect(app)
if app.debug:
DebugToolbarExtension(app)
register_jinja(app)
register_routes(app)
register_error_handle(app)
register_db(app)
register_logger(app)
register_login_manager(app)
register_moment(app)
register_mail(app)
app.jinja_env.filters['markdown'] = filters.markdown
app.jinja_env.filters['normalize'] = filters.normalize
app.jinja_env.filters[
'engrolename_chinrolename'] = filters.engrolename_chinrolename
app.jinja_env.filters['ismyself'] = filters.ismyself
@app.before_request
def before_request():
g.user = get_current_user()
if g.user:
if not g.user.is_activated:
flash('账户尚未激活,请先登陆' + g.user.email + '查收验证邮件并激活账户')
signout_user()
if g.user.is_banned:
flash('账户已被禁用, 请联系管理员')
signout_user()
return app
app = create_app()
|
dhcrzf/zulip
|
zerver/views/muting.py
|
Python
|
apache-2.0
| 2,792
| 0.005372
|
from django.http import HttpResponse, HttpRequest
from typing import Optional
import ujson
from django.utils.translation import ugettext as _
from zerver.lib.actions import do_mute_topic, do_unmute_topic
from zerver.lib.request import has_request_variables, REQ
from zerver.lib.response import json_success, json_error
from zerver.lib.topic_mutes import topic_is_muted
from zerver.lib.streams import (
access_stream_by_id,
access_stream_by_name,
access_stream_for_unmute_topic_by_id,
access_stream_for_unmute_topic_by_name,
check_for_exactly_one_stream_arg,
)
from zerver.lib.validator import check_int
from zerver.models import get_stream, Stream, UserProfile
def mute_topic(user_profile: UserProfile,
stream_id: Optional[int],
stream_name: Optional[str],
topic_name: str) -> HttpResponse:
if stream_name is not None:
(stream, recipient, sub) = access_stream_by_name(user_profile, stream_name)
else:
assert stream_id is not None
(stream, recipient, sub) = access_stream_by_id(user_profile, stream_id)
if topic_is_muted(user_profile, stream.id, topic_name):
return json_error(_("Topic already muted"))
do_mute_topic(user_profile, stream, recipient, topic_name)
return json_success()
def unmute_topic(user_profile: UserProfile,
stream_id: Optional[int],
stream_name: Optional[str],
topic_name: str) -> HttpResponse:
error = _("Topic is not muted")
if stream_name is not None:
stream = access_stream_for_unmute_topic_by_name(user_profile, stream_name, error)
else:
assert stream_id is not None
stream = access_stream_for_unmute_topic_by_id(user_profile, stream_id, error)
if not topic_is_muted(user_profile, stream.id, topic_name):
return json_error(error)
do_unmute_topic(user_profile, st
|
ream, topic_name)
return json_success()
@has_request_variables
def update_muted_topic(request: HttpRequest,
user_profile: UserProfile,
stream_id: Optional[int]=REQ(validator=check_int, default=None),
stream: Optional[str]=REQ(default=None),
topic: str=REQ(),
op: str=REQ()) -> HttpResponse:
check_for_exactly_one_stream_arg(stream_id=stream_id, stream=stream)
|
if op == 'add':
return mute_topic(
user_profile=user_profile,
stream_id=stream_id,
stream_name=stream,
topic_name=topic,
)
elif op == 'remove':
return unmute_topic(
user_profile=user_profile,
stream_id=stream_id,
stream_name=stream,
topic_name=topic,
)
|
woltage/ansible
|
lib/ansible/cli/__init__.py
|
Python
|
gpl-3.0
| 20,703
| 0.007149
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import operator
import optparse
import os
import sys
import time
import yaml
import re
import getpass
import subprocess
from ansible import __version__
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.utils.unicode import to_bytes
from ansible.utils.display import Display
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
#FIXME: epilog parsing: OptionParser.format_epilog = lambda self, formatter: self.epilog
def format_help(self, formatter=None, epilog=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
class CLI(object):
''' code behind bin/ansible* programs '''
VALID_ACTIONS = ['No Actions']
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
PAGER = 'less'
LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
def __init__(self, args, display=None):
"""
Base init method for all command line programs
"""
self.args = args
self.options = None
self.parser = None
self.action = None
if display is None:
self.display = Display()
else:
self.display = display
def set_action(self):
"""
Get the action the user wants to execute from the sys argv list.
"""
for i in range(0,len(self.args)):
arg = self.args[i]
if arg in self.VALID_ACTIONS:
self.action = arg
del self.args[i]
break
if not self.action:
raise AnsibleOptionsError("Missing required action")
def execute(self):
"""
Actually runs a child defined method using the execute_<action> pattern
"""
fn = getattr(self, "execute_%s" % self.action)
fn()
def parse(self):
raise Exception("Need to implement!")
def run(self):
if self.options.verbosity > 0:
self.display.display("Using %s as config file" % C.CONFIG_FILE)
@staticmethod
def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
''' prompt for vault password and/or password change '''
vault_pass = None
new_vault_pass = None
try:
if ask_vault_pass:
vault_pass = getpass.getpass(prompt="Vault password: ")
if ask_vault_pass and confirm_vault:
vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
if vault_pass != vault_pass2:
raise errors.AnsibleError("Passwords do not match")
if ask_new_vault_pass:
new_vault_pass = getpass.getpass(prompt="New Vault password: ")
if ask_new_vault_pass and confirm_new:
new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
if new_vault_pass != new_vault_pass2:
raise errors.AnsibleError("Passwords do not match")
except EOFError:
pass
# enforce no newline chars at the end of passwords
if vault_pass:
vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
if new_vault_pass:
new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
return vault_pass, new_vault_pass
def ask_passwords(self):
''' prompt for connection and become passwords if needed '''
op = self.options
sshpass = None
becomepass = None
become_prompt = ''
try:
if op.ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % op.become_method.upper()
if sshpass:
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
else:
become_prompt = "%s password: " % op.become_method.upper()
if op.become_ask_pass:
becomepass = getpass.getpass(prompt=become_prompt)
if op.ask_pass and becomepass == '':
becomepass = sshpass
if becomepass:
becomepass = to_bytes(becomepass)
except EOFError:
pass
return (sshpass, becomepass)
def normalize_become_options(self):
''' this keeps backwards compatibility with sudo/su self.options '''
self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER
if self.options.become:
pass
elif self.options.sudo:
self.options.become = True
self.options.become_method = 'sudo'
elif self.options.su:
self.options.become = True
self.options.become_method = 'su'
def validate_conflicts(self, vault_opts=False, runas_opts=False):
''' check for conflicting options '''
op = self.options
if vault_opts:
# Check for vault related conflicts
if (op.ask_vault_pass and op.vault_password_file):
self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
if runas_opts:
# Check for privilege escalation conflicts
if (op.su or op.su_user or op.ask_su_pass) and \
(op.sudo or op.sudo_user or op.ask_sudo_pass) or \
(op.su or op.su_user or op.ask_su_pass) and \
(op.become or op.become_user or op.become_ask_pass) or \
(op.sudo or op.sudo_user or op.ask_sudo_pass) and \
(op.become or op.become_user or op.become_ask_pass):
self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
|
"and su arguments ('-su', '--su-user', and '--ask-su-pass') "
"and become arguments ('--become', '--become-user', and '--ask-become-pass')"
" are exclusive of each other")
@staticmethod
def expand_tilde(option, opt, value, parser):
setattr(parser.values, option.dest, os.path.expanduser(value))
@staticmethod
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False, epilog=None, fork_opts=False):
''' create an options parser for most ansible scripts '''
#FIXME: implemente epilog parsing
#OptionParser.format_epilog = lambda self, fo
|
|
dotKom/onlineweb4
|
apps/events/tests/all_tests.py
|
Python
|
mit
| 3,574
| 0.003637
|
# -*- coding: utf-8 -*-
import datetime
from django.conf import settings
from django.test import TestCase, override_settings
from django.utils import timezone
from django_dynamic_fixture import G
from apps.events.models import AttendanceEvent, Event
class EventOrderedByRegistrationTestCase(TestCase):
def setUp(self):
self.FEATURED_TIMEDELTA_SETTINGS = settings
# Override settings so that the tests will work even if we update the default delta
self.FEATURED_TIMEDELTA_SETTINGS.OW4_SETTINGS['events']['OW4_EVENTS_FEATURED_DAYS_FUTURE'] = 7
self.FEATURED_TIMEDELTA_SETTINGS.OW4_SETTINGS['events']['OW4_EVENTS_FEATURED_DAYS_PAST'] = 7
def test_registration_no_push_forward(self):
"""
Tests that an AttendanceEvent with registration date far in the future is sorted by its event end date,
like any other event.
"""
today = timezone.now()
month_ahead = today + datetime.timedelta(days=30)
month_ahead_plus_five = month_ahead + datetime.timedelta(days=5)
normal_event = G(Event, event_start=month_ahead, event_end=month_ahead)
pushed_event = G(Event, event_start=month_ahead_plus_five, event_end=month_ahead_plus_five)
G(AttendanceEvent, registration_start=month_ahead_plus_five, registration_end=month_ahead_plus_five,
event=pushed_event)
expected_order = [normal_event, pushed_event]
with override_settings(settings=self.FEATURED_TIMEDELTA_SETTINGS):
self.assertEqual(list(Event.by_registration.all()), expected_order)
def test_registration_start_pushed_forward(self):
"""
Tests that an AttendanceEvent with registration date within the "featured delta" (+/- 7 days from today)
will be pushed ahead in the event list, thus sorted by registration start rather than event end.
"""
today = timezone.now()
three_days_ahead = today + datetime.timedelta(days=3)
month_ahead = today + datetime.timedelta(days=30)
month_ahead_plus_five = month_ahead + datetime.timedelta(days=5)
normal_event = G(Event, event_start=month_ahead, event_end=month_ahead)
pushed_event = G(Event, event_start=month_ahead_plus_five, event_end=month_ahead_plus_five)
G(AttendanceEvent, registration_start=three_days_ahead, registration_end=three_days_ahead, event=pushed_event)
expected_order = [pushed_event, normal_event]
with override_settings(settings=self.FEATURED_TIMEDELTA_SETTINGS):
self.assertEqual(list(Event.by_registration.all()), expected_order)
def test_registration_past_push_forward(self):
"""
Tests that an AttendanceEvent with a registration date in the past, outside the "featured delta" (+/- 7 days)
will be sorted by the event's end date.
"""
today = timezone.now()
month_ahead = tod
|
ay + datetime.timedelta(days=30)
month_ahead_plus_five = month_ahead + datetime.timedelta(days=5)
month_back = today - datetime.timedelta(days=30)
normal_event = G(Event, event_start=month_ahead, event_end=month_ahead)
pushed_event = G(Event, event_start=month_ahead_plus_five, event_end=month_ahead_plus_five)
G(Attenda
|
nceEvent, registration_start=month_back, registration_end=month_back, event=pushed_event)
expected_order = [normal_event, pushed_event]
with override_settings(settings=self.FEATURED_TIMEDELTA_SETTINGS):
self.assertEqual(list(Event.by_registration.all()), expected_order)
|
nicholasserra/sentry
|
src/sentry/web/frontend/create_project.py
|
Python
|
bsd-3-clause
| 2,910
| 0.002062
|
from __future__ import absolute_import
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from sentry.models import Project, Team
from sentry.web.forms.add_project import AddProjectForm
from sentry.web.frontend.base import OrganizationView
from sentry.utils.http import absolute_uri
ERR_NO_TEAMS = 'You cannot create a new project because there are no teams to assign it to.'
class AddProjectWithTeamForm(AddProjectForm):
team = forms.ChoiceField(
choices=(), required=True,
help_text='The team controls who has access to this project.',
)
class Meta:
fields = ('name', 'team')
m
|
odel = Project
def __init__(self, user, team_list, *args, **kwargs):
super(AddProjectWithTeamForm, self).__init__(*args, **kwargs)
self.team_list = team_list
if len(self.team_list) == 1:
del self.fields['team']
else:
self.fields['t
|
eam'].choices = (
(t.slug, t.name)
for t in team_list
)
self.fields['team'].widget.choices = self.fields['team'].choices
def clean_team(self):
value = self.cleaned_data['team']
for team in self.team_list:
if value == team.slug:
return team
return None
def save(self, actor, ip_address):
team = self.cleaned_data.get('team', self.team_list[0])
return super(AddProjectWithTeamForm, self).save(actor, team, ip_address)
class CreateProjectView(OrganizationView):
# TODO(dcramer): I'm 95% certain the access is incorrect here as it would
# be probably validating against global org access, and all we care about is
# team admin
required_scope = 'team:write'
def get_form(self, request, organization, team_list):
return AddProjectWithTeamForm(request.user, team_list, request.POST or None, initial={
'team': request.GET.get('team'),
})
def handle(self, request, organization):
team_list = [
t for t in Team.objects.get_for_user(
organization=organization,
user=request.user,
)
if request.access.has_team_scope(t, self.required_scope)
]
if not team_list:
messages.error(request, ERR_NO_TEAMS)
return self.redirect(reverse('sentry-organization-home', args=[organization.slug]))
form = self.get_form(request, organization, team_list)
if form.is_valid():
project = form.save(request.user, request.META['REMOTE_ADDR'])
return self.redirect(absolute_uri('/{}/{}/settings/install/'.format(
organization.slug,
project.slug,
)))
context = {
'form': form,
}
return self.respond('sentry/create-project.html', context)
|
tpainter/df_everywhere
|
df_everywhere/test/sdltest/StackOverflow_Question_Code.py
|
Python
|
gpl-2.0
| 1,568
| 0.017857
|
import os
os.environ["PYSDL2_DLL_PATH"] = os.getcwd()
import sdl2
import win32gui
def get_windows_bytitle(title_text, exact = False):
"""
Gets window by title text. [Windows Only]
"""
def _window_callback(hwnd, all_windows):
all_windows.append((hwnd, win32gui.GetWindowText(hwnd)))
windows = []
win32gui.EnumWindows(_window_callback, windows)
if exact:
return [hwnd for hwnd, title in windows if title_text == title]
else:
return [hwnd for hwnd, title in windows if title_text in title]
sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO)
#This will return a handle to an open 'Notepad.exe' window.
window_handle = get_windows_bytitle("Untitled", False)
#Create a window so that the hint below can be set
a = sdl2.SDL_CreateWindow("test window", sdl2.SDL_WINDOWPOS_UNDEFINED,sdl2.SDL_WINDOWPOS_UNDEFINED, 200,200, 0 )
#Set
|
hint as recommended by SDL documentation: https://wiki.libsdl.org/SDL_CreateWindowFrom#Remarks
r
|
esult = sdl2.SDL_SetHint(sdl2.SDL_HINT_VIDEO_WINDOW_SHARE_PIXEL_FORMAT, hex(id(a)))
print(sdl2.SDL_GetError())
np_window = sdl2.SDL_CreateWindowFrom(window_handle[0])
print(sdl2.SDL_GetError())
np_sur = sdl2.SDL_GetWindowSurface(np_window)
print(sdl2.SDL_GetError())
save_sur = sdl2.SDL_CreateRGBSurface(0,np_sur[0].w,np_sur[0].h,32,0,0,0,0)
print(sdl2.SDL_GetError())
r = sdl2.SDL_BlitSurface(np_sur, None, save_sur, None)
print(sdl2.SDL_GetError())
result = sdl2.SDL_SaveBMP(save_sur,'test.bmp')
print(sdl2.SDL_GetError())
sdl2.SDL_FreeSurface(save_sur)
print(sdl2.SDL_GetError())
|
ArioX/tools
|
shodan.py
|
Python
|
gpl-2.0
| 9,697
| 0.028359
|
#!/usr/bin/python
# Exploit toolkit using shodan module for search exploit & host lookup
# Code : by jimmyromanticdevil
#
# Download :
# Before you run this code you must install shodan lib.
# $ wget [url]http://pypi.python.org/packages/source/s/shodan/shodan-0.4.tar.gz[/url]
# $ tar xvf shodan-0.2.tar.gz
# $ cd shodan-0.2/
# $ sudo python setup.py install
# Api key request:
# See in here : [url]http://www.shodanhq.com/api_doc[/url]
# Rules of shodan :
# 1. Don't make more than 1 query per second.
# 2. Be respectful when using the API, I don't have a lot of resources to work with.
# So users might want to get their own key (have to register on shodan's website).
# Plus all the requests go through shodan servers which might make it pretty slow if many people are using the service.
#
# Special thanks :
# thanks person :5ynl0rd,kiddies aka peneter,ne0 d4rk fl00der,oghie,parc0mx,me0nkz,suryal0e,zee_eichel
# mirwan aka cassaprogy,shadow_maker,suddent_death,aip,r3d3,dawflin,n1nj4,hakz,
# leXel,s3my0n,MaXe,Andre Corleone ,Shamus,and all my friend .
# thanks communty : Tecon-crew<[url]http://tecon-crew.org[/url]>
# Void-labs <[url]http://void-labs.org[/url]>
# Makassar ethical hacker<[url]http://makassarhacker.com/>[/url]
# Intern0t <[url]http://forum.intern0t.net/>[/url]
# Deadc0de <[url]http://forum.deadc0de.or.id/>[/url]
#-----------------------------------------------
import shodan,sys,time,base64,os
from time import sleep
from shodan import WebAPI
__author__='amltbXlyb21hbnRpY2Rldmls'
__email__ ='PHJvbWFudGljZGV2aWwuamltbXlAZ21haWwuY29tPg=='
__api__ ='Z4xjUqqsaQbFgYrnn3EBuoJsSC0VZTyI'#request youre api key and paste in here
_lolz_ = WebAPI(__api__)
def tayping(title):
try:
for i in title:
print "\b%s"%i,
sys.stdout.flush()
time.sleep(0.005)
except ImportError:
print "Some Error",
def check():
try:
checking = "[C]Checking module..."
tayping(checking)
sleep(2)
import shodan
except ImportError:
error ="\n[!]You must install Shodan Module in here :\n[url]http://pypi.python.org/packages/source/s/shodan/...[/url]"
tayping(error_module)
except KeyboardInterrupt:
print "\n[*]Exiting program...\n"
sys.exit(1)
else :
succes="\n[*]Shodan module is available..."
tayping(succes)
sleep(2)
try:
api_check="\n[C]Checking Api key.."
tayping(api_check)
sleep(2)
check_Api = len(__api__)
if check_Api==0:
error_api= "\n[!] Api key is not available\n[!]You must request Api key in here :[url]http://www.shodanhq.com/api_doc[/url]\n\n\n\n"
tayping(error_api)
sleep(2)
elif check_Api != 0:
succces = "\n[*]Api key is available\n\n\n\n"
tayping(succces)
sleep(3)
except KeyboardInterrupt:
print "\n[*] Exiting program...\n"
sys.exit(0)
def clear():
if sys.platform in ('linux-i386', 'linux2', 'darwin'):
SysCls = 'clear'
elif sys.platform == 'win32' or sys.platform == 'dos' or sys.platform[0:5] == 'ms-dos':
SysCls = 'cls'
else:
SysCls = 'unknown'
return SysCls
def title():
__Auth__= base64.b64decode(__author__)
__Eml__ = base64.b64decode(__email__)
title='''
//////////////////////////////////////////////////////
___________ .__ .__ __
\_ _____/___ _________ | | ____ |__|_/ |_
| __)_ \ \/ /\____ \ | | / _ \ | |\ __\\
| \ > < | |_> >| |__( <_
|
> )| | | |
/_______ //__/\_ \| __/ |____/ \____/ |__| |__|
\/ \/|__|/
Toolkit
Coder by : %s
Contach : %s
//////////////////////////////////////////////////////
'''%(__Auth__,__Eml__)
tayping(title)
def expoitdb():
try:
searching_Exploit= raw_input('[+]Search a exploit :')
print '[!]You search [%s] Exploit'% searching_Exploit
wtf = _lolz_.exploitdb.search(searching_Exploit)
more = wtf['tot
|
al']
print '[!]Found [%s] exploit with result [%s]'%(more,searching_Exploit)
try:
display =raw_input('[!]See all list exploit found?(y/n)')
if display =='y':
ds = wtf['matches']
for i in ds :
print'%s: %s' % (i['id'],i['description'])
except Exception,err:
print'[%s]'%err
try:
display_exploit=raw_input('[+]Select type exploit?(y/n)')
if display_exploit =='y':
print'choois types : remote, webapps, dos, local, shellcode ?'
rock =raw_input('')
print 'youre chooise [%s] exploits'%rock
r = wtf['matches']
for i in r:
if rock ==i['type']:
print'%s: %s' % (i['id'],i['description'])
except Exception,err:
print'[%s]'%err
try:
view_exploit=raw_input('[+]Select exploit to view ?(y/n)')
if view_exploit =='y':
print'[+]Enter exploit id to view :'
v = raw_input('')
lols=wtf['matches']
for i in lols:
if v == str(i['id']):
File_exploit = _lolz_.exploitdb.download(i['id'])
print 'Filename: %s'% File_exploit['filename']
print 'Content-type: %s' % File_exploit['content-type']
print File_exploit['data']
download_exploit= raw_input('[+]download the exploit ?(y/n)')
if download_exploit=='y':
dwnload = open(File_exploit['filename'], 'w')
dwnload.write(File_exploit['data'])
dwnload.close()
print'%s successfully download' % File_exploit['filename']
except Exception,err:
print'[%s]'%err
try_again=raw_input('[+]Do you want to try again ?(y/n):')
while try_again=='y':
os.system(clear())
title()
expoitdb()
try_again=raw_input('[+]Do you want to try again ?(y/n):')
main()
except KeyboardInterrupt, IOError:
print '\nYou pressed Ctrl+C or exited...'
main()
sys.exit(1)
def metasploit():
try:
module_search=raw_input('[!]Search module metasploit :')
print'[!]We will search metasploit module'
m_m = _lolz_.msf.search(module_search)
result = m_m['total']
print 'Modules found: %s'%result
result2 = m_m['matches']
for i in result2:
print '%s: %s' % (i['type'], i['name'])
download =raw_input('[+]Download module : (y/n)')
if download =='y':
file = _lolz_.msf.download(i['fullname'])
print 'Filename: %s' % file['filename']
print 'Content-type: %s' % file['content-type']
print file['data']
try_again = raw_input('[+]Do you want to try again ?(y/n)')
while try_again =='y':
os.system(clear())
title()
metasploit()
try_again = raw_input('[+]Do you want to try again ?(y/n)')
main()
except Exception,err:
print'[%s]'%err
def host():
try:
input_host = raw_input('[+]Input host :')
host_result = _lolz_.host(input_host)
ip =host_result['ip']
country=host_result.get('country', None)
city =host_result.get('city', None)
host_name =host_result['hostnames']
data =host_result['data']
resulting ="""
Ip addres = %s
Country = %s
City = %s
"""%(ip,country,city,)
tayping(
|
jonathanlurie/BLANK_PY2WX
|
src/main.py
|
Python
|
mit
| 492
| 0.006098
|
# importing wxPython library, see the reference here :
# http://www.wxpython.org/docs/a
|
pi/wx-module.html
# and an excelent step by step tutorial there :
# http://zetcode.com/wxpython
import wx
from Controller import *
# main function
def main():
# each wx application must have a wx.App object
app = wx.App
|
()
controller = Controller(title = "BLANK_PY2WX")
# entering the endless loop that catches all the events
app.MainLoop()
if __name__ == '__main__':
main()
|
haimich/knick-knack
|
doc/folderstructure/v3.2/ .knick-knack/python-fabric/files/setup.py
|
Python
|
mit
| 309
| 0.02589
|
import sys
from fabric.api
|
import *
from fabric.contrib import *
from fabric.contrib.project import rsync_project
from defaults import fab
from config import ssh, sudoers
import {%= name %}
@task
def prepare_vm():
sudoers.setup_sudoers_on_vm()
@task(default=True)
def system():
|
print 'start here'
|
ryfx/modrana
|
modules/_mod_clickMenu.py
|
Python
|
gpl-3.0
| 2,761
| 0.002898
|
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Menu for quickly adding waypoints when on move
#----------------------------------------------------------------------------
# Copyright 2007-2008, Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRAN
|
TY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
from modules.base_module import RanaModule
import cairo
from time import time
from math import pi
def ge
|
tModule(*args, **kwargs):
return ClickMenu(*args, **kwargs)
class ClickMenu(RanaModule):
"""Overlay info on the map"""
def __init__(self, *args, **kwargs):
RanaModule.__init__(self, *args, **kwargs)
self.lastWaypoint = "(none)"
self.lastWaypointAddTime = 0
self.messageLingerTime = 2
def handleMessage(self, message, messageType, args):
if message == "addWaypoint":
m = self.m.get("waypoints", None)
if m is not None:
self.lastWaypoint = m.newWaypoint()
self.lastWaypointAddTime = time()
def drawMapOverlay(self, cr):
"""Draw an overlay on top of the map, showing various information
about position etc."""
# waypoins will be done in another way, so this is disabled for the time being
# (x,y,w,h) = self.get('viewport')
#
# dt = time() - self.lastWaypointAddTime
# if(dt > 0 and dt < self.messageLingerTime):
# self.drawNewWaypoint(cr, x+0.5*w, y+0.5*h, w*0.3)
# else:
# m = self.m.get('clickHandler', None)
# if(m != None):
# m.registerXYWH(x+0.25*w,y+0.25*h,w*0.5,h*0.5, "clickMenu:addWaypoint")
def drawNewWaypoint(self, cr, x, y, size):
text = self.lastWaypoint
cr.set_font_size(200)
extents = cr.text_extents(text)
(w, h) = (extents[2], extents[3])
cr.set_source_rgb(0, 0, 0.5)
cr.arc(x, y, size, 0, 2 * pi)
cr.fill()
x1 = x - 0.5 * w
y1 = y + 0.5 * h
border = 20
cr.set_source_rgb(1, 1, 1)
cr.move_to(x1, y1)
cr.show_text(text)
cr.fill()
|
havard024/prego
|
venv/lib/python2.7/site-packages/django/db/models/base.py
|
Python
|
mit
| 44,041
| 0.002021
|
from __future__ import unicode_literals
import copy
imp
|
ort sys
from functools import update_wrapper
from django.utils.six.moves import zip
import django.db.models.manager # Imported to register signal handle
|
r.
from django.conf import settings
from django.core.exceptions import (ObjectDoesNotExist,
MultipleObjectsReturned, FieldError, ValidationError, NON_FIELD_ERRORS)
from django.core import validators
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.related import (ManyToOneRel,
OneToOneField, add_lazy_relation)
from django.db import (router, transaction, DatabaseError,
DEFAULT_DB_ALIAS)
from django.db.models.query import Q
from django.db.models.query_utils import DeferredAttribute, deferred_class_factory
from django.db.models.deletion import Collector
from django.db.models.options import Options
from django.db.models import signals
from django.db.models.loading import register_models, get_model
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import curry
from django.utils.encoding import force_str, force_text
from django.utils import six
from django.utils.text import get_text_list, capfirst
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# six.with_metaclass() inserts an extra class called 'NewBase' in the
# inheritance tree: Model -> NewBase -> object. But the initialization
# should be executed only once for a given model class.
# attrs will never be empty for classes declared in the standard way
# (ie. with the `class` keyword). This is quite robust.
if name == 'NewBase' and attrs == {}:
return super_new(cls, name, bases, attrs)
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase) and
not (b.__name__ == 'NewBase' and b.__mro__ == (b, object))]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
if getattr(meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
kwargs = {"app_label": model_module.__name__.split('.')[-2]}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class('DoesNotExist', subclass_exception(str('DoesNotExist'),
tuple(x.DoesNotExist
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (ObjectDoesNotExist,),
module, attached_to=new_class))
new_class.add_to_class('MultipleObjectsReturned', subclass_exception(str('MultipleObjectsReturned'),
tuple(x.MultipleObjectsReturned
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (MultipleObjectsReturned,),
module, attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Bail out early if we have already created this class.
m = get_model(new_class._meta.app_label, name,
seed_cache=False, only_installed=False)
if m is not None:
return m
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = new_class._meta.local_fields + \
new_class._meta.local_many_to_many + \
new_class._meta.virtual_fields
field_names = set([f.name for f in new_fields])
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [cls for cls in parents if hasattr(cls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
if (new_class._meta.local_fields or
new_class._meta.local_many_to_many):
raise FieldError("Proxy model '%s' contains model fields." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Do the appropriate setup for any model parents.
o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields
if isinstance(f, OneToOneField)])
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
|
ericpre/hyperspy
|
hyperspy/io_plugins/sur.py
|
Python
|
gpl-3.0
| 49,142
| 0.01691
|
# -*- coding: utf-8 -*-
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
# Plugin to read the mountainsmap surface format (sur)
#Current state can bring support to the surface format if the file is an
#attolight hyperspectral map, but cannot bring write nor support for other
#mountainsmap files (.pro etc.). I need to write some tests, check whether the
#comments can be systematically parsed into metadata and write a support for
#original_metadata or other
import logging
#Dateutil allows to parse date but I don't think it's useful here
#import dateutil.parser
import numpy as np
#Commented for now because I don't know what purpose it serves
#import traits.api as t
from copy import deepcopy
import struct
import sys
import zlib
import os
import warnings
#Maybe later we can implement reading the class with the io utils tools instead
#of re-defining read functions in the class
#import hyperspy.misc.io.utils_readfile as iou
#This module will prove useful when we write the export function
#import hyperspy.misc.io.tools
#DictionaryTreeBrowser class handles the fancy metadata dictionnaries
#from hyperspy.misc.utils import DictionaryTreeBrowser
from hyperspy.exceptions import MountainsMapFileError
_logger = logging.getLogger(__name__)
# Plugin characteristics
# ----------------------
format_name = 'Digital Surf Surface'
description = """Read data from the proprietary .sur file format from Digital
Surf. Allows hyperspy to interact with the mountains map software"""
full_support = False #Check with the boys once this is done
# Recognised file extension
file_extensions = ('sur', 'SUR','pro','PRO')
default_extension = 0
# Writing features
writes = False #First we will check with the load
non_uniform_axis = False
# ----------------------
class DigitalSurfHandler(object):
""" Class to read Digital Surf MountainsMap files.
Attributes
----------
filename, signal_dict, _work_dict, _list_sur_file_content, _Object_type,
_N_data_object, _N_data_channels, _initialized
Methods
-------
parse_file, parse_header, get_image_dictionaries
Class Variables
---------------
_object_type : dict key: int containing the mountainsmap object types
"""
#Object types
_mountains_object_types = {
-1: "_ERROR" ,
0: "_UNKNOWN" ,
1: "_PROFILE" ,
2: "_SURFACE" ,
3: "_BINARYIMAGE" ,
4: "_PROFILESERIE" ,
5: "_SURFACESERIE" ,
6: "_MERIDIANDISC" ,
7: "_MULTILAYERPROFILE" ,
8: "_MULTILAYERSURFACE" ,
9: "_PARALLELDISC" ,
10: "_INTENSITYIMAGE" ,
11: "_INTENSITYSURFACE" ,
12: "_RGBIMAGE" ,
13: "_RGBSURFACE" ,
14: "_FORCECURVE" ,
15: "_SERIEOFFORCECURVE" ,
16: "_RGBINTENSITYSURFACE",
20: "_SPECTRUM" ,
21: "_HYPCARD" ,
}
def __init__(self, filename=None):
#We do not need to check for file existence here because
#io module implements it in the load function
self.filename = filename
#The signal_dict dictionnary has to be returned by the
#file_reader function. Apparently original_metadata needs
#to be set
self.signal_dict = {'data': np.empty((0,0,0)),
'axes': [],
'metadata': {},
'original_metadata': {}
}
#Dictionary to store, read and write fields in the binary file
#defined in the MountainsMap SDK. Structure is
# _work_dict['Field']['value'] : access field value
# _work_dict['Field']['b_unpack_fn'](f) : unpack value from file f
# _work_dict['Field']['b_pack_fn'](f,v): pack value v in file f
self._work_dict = \
{
"_01_Signature":
{
'value':"DSCOMPRESSED",
'b_unpack_fn': lambda f: self._get_str(f,12,"DSCOMPRESSED"),
'b_pack_fn': lambda f,v: self._set_str(f,v,12),
},
"_02_Format":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_03_Number_of_Objects":
{
'value':1,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_04_Version":
{
'value':1,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_05_Object_Type":
{
'value':2,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_06_Object_Name":
{
'value':"",
'b_unpack_fn': lambda f: self._get_str(f,30,"DOSONLY"),
'b_pack_fn': lambda f,v: self._set_str(f,v,30),
},
"_07_Operator_Name":
{
'value':"",
'b_unpack_fn': lambda f: self._get_str(f,30,""),
'b_pack_fn': lambda f,v: self._set_str(f,v,30),
},
"_08_P_Size":
{
'value':1,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_09_Acquisition_Type":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_10_Range_Type":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_11_Special_Points":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_12_Absolute":
{
'value':0,
'b_unpack_fn': self._get_int16,
'b_pack_fn': self._set_int16,
},
"_13_Gauge_Resolution":
{
'value':0.0,
'b_unpack_fn': self._get_float,
'b_pack_fn': self._set_float,
},
"_14_W_Size":
{
'value':0,
|
'b_unpack_fn': self._get_int32,
'b_pack_fn': self._set_int32,
},
"_15_Size_of_Points":
{
'value':16,
'b_unpack_fn':lambda f: self._get_int16(f,32),
'b_pack_fn': self._set_int16,
},
"_16_Zmin":
{
'val
|
ue':0,
'b_unpack_fn':self._get_int32,
'b_pack_
|
buhe/judge
|
executors/ruby.py
|
Python
|
agpl-3.0
| 393
| 0.005089
|
from .base_executor import ScriptExecutor
from judgeenv import env
class RubyExec
|
utor(ScriptExecutor):
ext = '.rb'
name = 'RUBY'
address_grace = 65536
fs = ['.*\.(?:so|rb$)', '/etc/localtime$', '/dev/urandom$', '/proc/self', '/usr/lib/ruby/gems/']
test_program = 'puts gets'
@classmethod
|
def get_command(cls):
return env['runtime'].get(cls.name.lower())
|
nachandr/cfme_tests
|
cfme/containers/service.py
|
Python
|
gpl-2.0
| 4,417
| 0.002264
|
import attr
from navmazing import NavigateToAttribute
from navmazing import NavigateToSibling
from cfme.common import Taggable
from cfme.common import TagPageView
from cfme.containers.provider import ContainerObjectAllBaseView
from cfme.containers.provider import ContainerObjectDetailsBaseView
from cfme.containers.provider import GetRandomInstancesMixin
from cfme.containers.provider import Labelable
from cfme.containers.provider import LoggingableView
from cfme.modeling.base import BaseCollection
from cfme.modeling.base import BaseEntity
from cfme.utils.appliance.implementations.ui import CFMENavigateStep
from cfme.utils.appliance.implementations.ui import navigator
from cfme.utils.providers import get_crud_by_name
class ServiceView(ContainerObjectAllBaseView, LoggingableView):
"""Container Nodes view"""
@property
def in_service(self):
"""Determine if the Service page is currently open"""
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Compute', 'Containers', 'Container Services']
)
class ServiceAllView(ServiceView):
"""Container Services All view"""
SUMMARY_TEXT = "Container Services"
@property
def is_displayed(self):
return self.in_service and super().is_displayed
class ServiceDetailsView(ContainerObjectDetailsBaseView):
"""Container Services Details view"""
SUMMARY_TEXT = "Container Services"
@attr.s
class Service(BaseEntity, Taggable, Labelable):
PLURAL = 'Container Services'
all_view = ServiceAllView
d
|
etails_view = ServiceDetailsView
name = attr.ib()
project_name = attr.ib()
provider = attr.ib()
@attr.s
class ServiceCollection(GetRandomInstancesMixin, BaseCollection):
"""Collection object for :py:class:`Service`."""
ENTITY = Service
def all(self):
# container_services table has ems_id, join with ext_mgmgt_systems on id for provider name
# Then join with container_projects on the id for the project
|
service_table = self.appliance.db.client['container_services']
ems_table = self.appliance.db.client['ext_management_systems']
project_table = self.appliance.db.client['container_projects']
service_query = (
self.appliance.db.client.session
.query(service_table.name, project_table.name, ems_table.name)
.join(ems_table, service_table.ems_id == ems_table.id)
.join(project_table, service_table.container_project_id == project_table.id))
provider = None
# filtered
if self.filters.get('provider'):
provider = self.filters.get('provider')
service_query = service_query.filter(ems_table.name == provider.name)
services = []
for name, project_name, ems_name in service_query.all():
services.append(self.instantiate(name=name, project_name=project_name,
provider=provider or get_crud_by_name(ems_name)))
return services
@navigator.register(ServiceCollection, 'All')
class All(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
VIEW = ServiceAllView
def step(self, *args, **kwargs):
self.prerequisite_view.navigation.select('Compute', 'Containers', 'Container Services')
def resetter(self, *args, **kwargs):
# Reset view and selection
self.view.toolbar.view_selector.select("List View")
self.view.paginator.reset_selection()
@navigator.register(Service, 'Details')
class Details(CFMENavigateStep):
prerequisite = NavigateToAttribute('parent', 'All')
VIEW = ServiceDetailsView
def step(self, *args, **kwargs):
search_visible = self.prerequisite_view.entities.search.is_displayed
self.prerequisite_view.entities.get_entity(name=self.obj.name,
project_name=self.obj.project_name,
surf_pages=not search_visible,
use_search=search_visible).click()
@navigator.register(Service, 'EditTags')
class EditTags(CFMENavigateStep):
VIEW = TagPageView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.policy.item_select('Edit Tags')
|
buske/variant-subscription-service
|
vss/scripts/import.py
|
Python
|
gpl-3.0
| 2,871
| 0.002438
|
import sys
import gzip
import logging
from csv import DictReader
from datetime import datetime
from . import app, connect_db
from ..constants import DEFAULT_GENOME_BUILD, BENIGN, UNCERTAIN, UNKNOWN, PATHOGENIC
from ..extensions import mongo
from ..backend import build_variant_doc, get_variant_category, update_variant_task, create_variant_task, run_variant_tasks
from ..services.notifier import UpdateNotifier
logging.basicConfig(format="%(levelname)s (%(name)s %(lineno)s): %(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def iter_variants(filename):
with gzip.open(filename, 'rt') as ifp:
for row in DictReader(ifp, dialect='excel-tab'):
yield row
def did_variant_category_change(old_doc, new_doc):
old_category = get_variant_category(old_doc)
new_category = get_variant_category(new_doc)
return old_category != new_category
def iter_variant_updates(db, variants):
for variant in variants:
new_doc = build_variant_doc(DEFAULT_GENOME_BUILD, **variant)
doc_id = new_doc['_id']
old_doc = db.variants.find_one({ '_id': doc_id })
if did_variant_category_change(old_doc, new_doc):
yield (o
|
ld_doc, new_doc)
def main(clinvar_filename):
db = connect_db()
notifier = UpdateNotifier(db, app.config)
started_at = datetime.utcnow()
task_list = []
variant_iterator = iter_variants(clinvar_filename)
for i, (old_doc, new_doc) in enumerate(iter_variant_updates(db, variant_iterator)):
if i % 10000 == 0:
logger.debug('Processed {} variants'.format(i))
if old
|
_doc:
# Variant is already known, either:
# - someone subscribed before it was added to clinvar, or
# - it was already in clinvar, and we might have new annotations
task = update_variant_task(db, old_doc, new_doc)
else:
# Add clinvar annotations with empty subscriber data
task = create_variant_task(db, new_doc)
task_list.append(task)
results = run_variant_tasks(db, task_list, notifier=notifier)
logger.debug('Variants updated. Results: {}'.format(results))
db.updates.insert_one({
'started_at': started_at,
'finished_at': datetime.utcnow(),
'inserted_count': results['inserted'],
'modified_count': results['modified'],
'notified_count': results['notified'],
})
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='Update ClinVar data')
parser.add_argument('clinvar_filename', metavar='CLINVAR_ALLELES_TSV_GZ', type=str,
help='clinvar_alleles.single.b*.tsv.gz from github.com/macarthur-lab/clinvar pipeline')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(args.clinvar_filename)
|
HiSPARC/sapphire
|
sapphire/simulations/ldf.py
|
Python
|
gpl-3.0
| 16,510
| 0
|
"""
Lateral distribution functions that can be used for simulating particle
densities and for fitting to data.
Example usage::
>>> import tables
>>> from sapphire import NkgLdfSimulation, ScienceParkCluster
>>> data = tables.open_file('/tmp/test_ldf_simulation.h5', 'w')
>>> cluster = ScienceParkCluster()
>>> sim = NkgLdfSimulation(max_core_distance=400, min_energy=1e15,
... max_energy=1e21, cluster=cluster,
... datafile=data, n=200)
>>> sim.run()
"""
import warnings
from numpy import arctan2, cos, log10, pi, random, sin, sqrt
from scipy.special import gamma
from ..utils import pbar, vector_length
from .detector import ErrorlessSimulation, HiSPARCSimulation
class BaseLdfSimulation(HiSPARCSimulation):
def __init__(self, max_core_distance, min_energy, max_energy, *args,
**kwargs):
"""Simulation initialization
:param max_core_distance: maximum distance of shower core to
center of cluster (in meters).
:param min_energy,max_energy: Minimum and maximum energy of the
shower (in eV).
"""
super(BaseLdfSimulation, self).__init__(*args, **kwargs)
self.ldf = BaseLdf()
self.max_core_distance = max_core_distance
self.min_energy = min_energy
self.max_energy = max_energy
# The cluster is not moved, so detector positions can be stored.
for station in self.cluster.stations:
for detector in station.detectors:
detector.xy_coordinates = detector.get_xy_coordinates()
def generate_shower_parameters(self):
"""Generate shower parameters, i.e. core position
For the simple LDF only the core position is relevant. It
assumes the shower to come from the Zenith.
:return: dictionary with shower parameters: core_pos
(x, y-tuple).
"""
r = self.max_core_distance
giga = int(1e9)
for i in pbar(range(self.n), show=self.progress):
energy = self.generate_energy(self.min_energy, self.max_energy)
size = 10 ** (log10(energy) - 15 + 4.8)
shower_parameters = {'ext_timestamp': (giga + i) * giga,
'azimuth': self.generate_azimuth(),
'zenith': 0.,
'core_pos': self.generate_core_position(r),
'size': size,
'energy': energy}
yield shower_parameters
def simulate_detector_response(self, detector, shower_parameters):
"""Simulate detector response to a shower
Get the mips in a detector from the LDF.
:param detector: :class:`~sapphire.clusters.Detector` for which
the observables will be determined.
:param shower_parameters: dictionary with the shower parameters.
"""
n_detected = self.get_num_particles_in_detector(detector,
shower_parameters)
theta = shower_parameters['zenith']
if n_detected:
mips = self.simulate_detector_mips(n_detected, theta)
observables = {'n': mips}
else:
observables = {'n': 0.}
return observables
def get_num_particles_in_detector(self, detector, shower_parameters):
"""Get the number of particles in a detector
:param detector: :class:`~sapphire.clusters.Detector` for which
the number of particles will be determined.
:param shower_parameters: dictionary with the shower parameters.
"""
x, y = detector.xy_coordinates
core_x, core_y = shower_parameters['core_pos']
zenith = shower_parameters['zenith']
azimuth = shower_parameters['azimuth']
size = shower_parameters['size']
r = self.ldf.calculate_core_distance(x, y, core_x, core_y, zenith,
azimuth)
p_shower = self.ldf.calculate_ldf_value(r, n_electrons=size)
p_ground = p_shower * cos(zenith)
num_particles = self.simulate_particles_for_density(
p_ground * detector.get_area())
return num_particles
@staticmethod
def simulate_particles_for_density(p):
"""Get number of particles in detector given a particle density
:param p: particle density in number per detector area.
:return: random number from Poisson distribution.
"""
return random.poisson(p)
class BaseLdfSimulationWithoutErrors(ErrorlessSimulation, BaseLdfSimulation):
"""This simulation does not simulate errors/uncertainties
This should result in perfect particle counting for the detectors.
"""
@staticmethod
def simulate_particles_for_density(p):
"""Exact number"""
return p
class NkgLdfSimulation(BaseLdfSimulation):
"""Same as the BaseLdfSimulation but uses the NkgLdf as LDF"""
def __init__(self, *args, **kwargs):
super(NkgLdfSimulation, self).__init__(*args, **kwargs)
self.ldf = NkgLdf()
class NkgLdfSimulationWithoutErrors(NkgLdfSimulation,
BaseLdfSimulationWithoutErrors):
"""Same as the NkgLdfSimulation but without error simulation"""
pass
class KascadeLdfSimulation(BaseLdfSimulation):
"""Same as the BaseLdfSimulation but uses the KascadeLdf as LDF"""
def __init__(self, *args, **kwargs):
super(KascadeLdfSimulation, self).__init__(*args, **kwargs)
self.ldf = KascadeLdf()
class KascadeLdfSimulationWithoutErrors(KascadeLdfSimulation,
BaseLdfSimulationWithoutErrors):
"""Same as the KascadeLdfSimulation but without error simulation"""
pass
class EllipsLdfSimulation(BaseLdfSimulation):
"""Same as BaseLdfSimulation but uses the EllipsLdF as LDF"""
def __init__(self, *args, **kwargs):
super(EllipsLdfSimulation, self).__init__(*args, **kwargs)
self.ldf = EllipsLdf()
def generate_shower_parameters(self):
"""Generate shower parameters, i.e. core position
For the elliptic LDF both the core position and the zenith angle
are relevant.
:return: dictionary with shower parameters: core_pos
(x, y-tuple).
"""
r = self.max_core_distance
giga = int(1e9)
for i in pbar(range(self.n), show=self.progress):
energy = self.generate_energy(self.min_energy, self.max_energy)
size = 10 ** (log10(energy) - 15 + 4.8)
shower_parameters = {'ext_timestamp': (giga + i) * giga,
'azimuth': self.generate_azimuth(),
'zenith': self.generate_zenith(),
'core_pos': self.generate_core_position(r),
'size': size,
'energy': energy}
yield shower_parameters
def get_num_particles_in_detector(self, detector, shower_parameters):
"""Get the number o
|
f parti
|
cles in a detector
:param detector: :class:`~sapphire.clusters.Detector` for which
the number of particles will be determined.
:param shower_parameters: dictionary with the shower parameters.
"""
x, y = detector.xy_coordinates
core_x, core_y = shower_parameters['core_pos']
zenith = shower_parameters['zenith']
azimuth = shower_parameters['azimuth']
size = shower_parameters['size']
r, phi = self.ldf.calculate_core_distance_and_angle(x, y, core_x,
core_y)
p_ground = self.ldf.calculate_ldf_value(r, phi, size, zenith, azimuth)
num_particles = self.simulate_particles_for_density(
p_ground * detector.get_area())
return num_particles
class BaseLdf(object):
"""Base LDF class
No particles!
|
ashander/msprime
|
msprime/trees.py
|
Python
|
gpl-3.0
| 89,405
| 0.001455
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2017 University of Oxford
#
# This file is part of msprime.
#
# msprime is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# msprime is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with msprime. If not, see <http://www.gnu.org/licenses/>.
#
"""
Module responsible to generating and reading tree files.
"""
from __future__ import division
from __future__ import print_function
import collections
import itertools
import json
import sys
import base64
try:
import numpy as np
_numpy_imported = True
except ImportError:
_numpy_imported = False
import _msprime
import msprime.drawing as drawing
import msprime.provenance as provenance
import msprime.tables as tables
from _msprime import NODE_IS_SAMPLE
NULL_NODE = -1
NULL_POPULATION = -1
NULL_MUTATION = -1
IS_PY2 = sys.version_info[0] < 3
def check_numpy():
if not _numpy_imported:
raise RuntimeError("numpy is required for this operation.")
CoalescenceRecord = collections.namedtuple(
"CoalescenceRecord",
["left", "right", "node", "children", "time", "population"])
# TODO this interface is rubbish. Should have much better printing options.
# TODO we should be use __slots__ here probably.
class SimpleContainer(object):
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return repr(self.__dict__)
class Node(SimpleContainer):
"""
A :ref:`node <sec_node_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
:ivar id: The integer ID of this node. Varies from 0 to
:attr:`.TreeSequence.num_nodes` - 1.
:vartype id: int
:ivar flags: The bitwise flags for this node.
:vartype flags: int
:ivar time: The birth time of the individual represented by this node.
:vartype float: float
:ivar population: The integer ID of the population that this node was born in.
:vartype population: int
:ivar metadata: The :ref:`metadata <sec_metadata_definition>` for this node.
:vartype metadata: bytes
"""
def __init__(
self, id_=None, flags=0, time=0, population=NULL_POPULATION, metadata=""):
self.id = id_
self.time = time
self.population = population
self.metadata = metadata
self.flags = flags
def is_sample(self):
"""
Returns True if this node is a sample. This value is derived from the
``flag`` variable.
:rtype: bool
"""
return self.flags & NODE_IS_SAMPLE
class Edge(SimpleContainer):
"""
An :ref:`edge <sec_edge_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
:ivar left: The left coordinate of this edge.
:vartype left: float
:ivar right: The right coordinate of this edge.
:vartype right: float
:ivar parent: The integer ID of the parent node for this edge.
To obtain further information about a node with a given ID, use
:meth:`.TreeSequence.node`.
:vartype parent: int
:ivar child: The integer ID of the child node for this edge.
To obtain further information about a node with a given ID, use
:meth:`.TreeSequence.node`.
:vartype child: int
"""
def __init__(self, left, right, parent, child):
self.left = left
self.right = right
self.parent = parent
self.child = child
def __repr__(self):
return "{{left={:.3f}, right={:.3f}, parent={}, child={}}}".format(
self.left, self.right, self.parent, self.child)
class Site(SimpleContainer):
"""
A :ref:`site <sec_site_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
:ivar id: The integer ID of this site. Varies from 0 to
:attr:`.TreeSequence.num_sites` - 1.
:vartype id: int
:ivar position: The floating point location of this site in genome coordinates.
Ranges from 0 (inclusive) to :attr:`.TreeSequence.sequence_length`
(exclusive).
:vartype position: float
:ivar ancestral_state: The ancestral state at this site (i.e., the state
inherited by nodes, unless mutations occur).
:vartype ancestral_state: str
:ivar metadata: The :ref:`metadata <sec_metadata_definition>` for this site.
:vartype metadata: bytes
:ivar mutations: The list of mutations at this site. Mutations
within a site are returned in the order they are specified in the
underlying :class:`.MutationTable`.
:vartype mutations: list[:class:`.Mutation`]
"""
def __init__(self, id_, position, ancestral_state, mutations, metadata):
self.id = id_
self.position = position
self.ancestral_state = ancestral_state
self.mutations = mutations
self.metadata = metadata
class Mutation(SimpleContainer):
"""
A :ref:`mutation <sec_mutation_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
:ivar id: The integer ID of this mutation. Varies from 0 to
:attr:`.TreeSequence.num_mutations` - 1.
:vartype id: int
:ivar site: The integer ID of the site that this mutation occurs at. To obtain
further information about a site with a given ID use
:meth:`.TreeSequence.site`.
:vartype site: int
:ivar node: The integer ID of the first node that inherits this mutation.
To obtain further information about a node with a given ID, use
:meth:`.TreeSequence.node`.
:vartype node: int
:ivar derived_state: The derived state for this mutation. This is the state
inherited by nodes in the subtree rooted at this mutation's node, unless
another mutation occurs.
:vartype derived_state: str
:ivar parent: The integer ID of this mutation's parent mutation. When multiple
mutations occur at a site along a path in the tree, mutations must
record the mutation that is immediately above them. If the mutation does
not have a parent, this is equal to the :const:`NULL_MUTATION` (-1).
To obtain further information about a mutation with a given ID, use
:meth:`.TreeSequence.mutation`.
:vartype parent: int
:ivar metadata: The :ref:`metadata <sec_metadata_definition>` for this site.
:vartype metada
|
ta: bytes
"""
def __init__(se
|
lf, id_, site, node, derived_state, parent, metadata):
self.id = id_
self.site = site
self.node = node
self.derived_state = derived_state
self.parent = parent
self.metadata = metadata
class Migration(SimpleContainer):
"""
A :ref:`migration <sec_migration_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
:ivar left: The left end of the genomic interval covered by this
migration (inclusive).
:vartype left: float
:ivar right: The right end of the genomic interval covered by this migration
(exclusive).
:vartype right: float
:ivar node: The integer ID of the node involved in this migration event.
To obtain further information about a node with a given ID, use
:meth:`.TreeSequence.node`.
:vartype node: int
:ivar source: The source population ID.
:vartype source: int
:ivar dest: The destination population ID.
:vartyp
|
patrykomiotek/seo-monitor-api
|
app/db.py
|
Python
|
mit
| 524
| 0.003817
|
import pym
|
ongo
from flask import g
from flask import current_app as app
def get_db():
if not hasattr(g, 'conn'):
print(app.config)
g.conn = pymongo.MongoClient(
app.config['MONGODB_HOST'],
int(app.config['MONGODB_PORT'])
)
if not hasattr(g, 'db'):
g.db = g.conn[app.config['MONGODB_DB']]
return g.db
# todo
# @app.teardown_appcontext
# def teardown_db(exception):
# conn = getattr(g, 'conn', None)
# if con
|
n is not None:
# conn.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.