repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
LouisLinY/mblog
|
mainsite/views.py
|
Python
|
apache-2.0
| 1,203
| 0.009975
|
# -*- coding:utf-8 -*-
import sys
from datetime import dateti
|
me
from django.template.loader import get_template
from django.shortcuts import render,redirect
from django.http import HttpResponse
from
|
.models import Post
reload(sys)
sys.setdefaultencoding('utf-8')
# Create your views here.
def homepage(request):
template = get_template('index.html')
posts = Post.objects.all()
posts_lists = list()
now = datetime.now()
html = template.render(locals())
print sys.getdefaultencoding()
#for count, post in enumerate(posts):
# print post
# print post.pub_date
# print post.slug
# #posts_lists.append("NO.{}:".format(str(count)) + str(post) + "<br />")
# posts_lists.append("NO.{}:".format(str(count)) + str(post) + "<hr />")
# posts_lists.append("<small>" + str(post.body) + "</small><br /><br />")
return HttpResponse(html)
def showpost(request,slug):
template = get_template('post.html')
try:
post = Post.objects.get(slug = slug)
print post
if post != None:
html = template.render(locals())
return HttpResponse(html)
except:
return redirect('/homepage/')
|
blaze33/django
|
django/utils/encoding.py
|
Python
|
bsd-3-clause
| 9,166
| 0.001855
|
from __future__ import unicode_literals
import codecs
import datetime
from decimal import Decimal
import locale
try:
from urllib.parse import quote
except ImportError: # Python 2
from urllib import quote
import warnings
from django.utils.functional import Promise
from django.utils import six
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
class StrAndUnicode(object):
"""
A class that derives __str__ from __unicode__.
On Python 2, __str__ returns the output of __unicode__ encoded as a UTF-8
bytestring. On Python 3, __str__ returns the output of __unicode__.
Useful as a mix-in. If you support Python 2 and 3 with a single code base,
you can inherit this mix-in and just define __unicode__.
"""
def __init__(self, *args, **kwargs):
warnings.warn("StrAndUnicode is deprecated. Define a __str__ method "
"and apply the @python_2_unicode_compatible decorator "
"instead.", PendingDeprecationWarning, stacklevel=2)
super(StrAndUnicode, self).__init__(*args, **kwargs)
if six.PY3:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
return self.__unicode__().encode('utf-8')
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if not six.PY3:
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, six.integer_types + (type(None), float, Decimal,
datetime.datetime, datetime.date, datetime.time))
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% when s is an instance of
# six.text_type. This function gets called often in that setting.
if isinstance(s, six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, six.string_types):
if hasattr(s, '__unicode__'):
s = s.__unicode__()
else:
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populat
|
ed with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
|
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_text(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and (s is None or isinstance(s, int)):
return s
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b' '.join([force_bytes(arg, encoding, strings_only,
errors) for arg in s])
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
if six.PY3:
smart_str = smart_text
force_str = force_text
else:
smart_str = smart_bytes
force_str = force_bytes
# backwards compatibility for Python 2
smart_unicode = smart_text
force_unicode = force_text
smart_str.__doc__ = """\
Apply smart_text in Python 3 and smart_bytes in Python 2.
This is suitable for writing to sys.stdout (for instance).
"""
force_str.__doc__ = """\
Apply force_text in Python 3 and force_bytes in Python 2.
"""
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return quote(force_bytes(iri), safe=b"/#%[]=:;$&()+,!?*@'~")
def filepath_to_uri(path):
"""Convert an file system path to a URI portion that is suitable for
inclusion in a URL.
W
|
IllusionRom-deprecated/android_platform_tools_idea
|
python/testData/refactoring/extractmethod/Statement.after.py
|
Python
|
apache-2.0
| 98
| 0.010204
|
def foo(a_new, b_new):
print(a_new + b_new * 123)
de
|
f f():
a = 1
b = 1
fo
|
o(a, b)
|
termie/openstack-dashboard
|
django-openstack/src/django_openstack/dash/urls.py
|
Python
|
apache-2.0
| 1,070
| 0.002804
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
from django.conf.urls.defaults import *
from django.conf import settings
INSTANCES = r'^(?P<tenant_id>[^/]+)/instances/(?P<instance_id>[^/]+)/%s$'
IMAGES = r'^(?P<tenant_id>[^/]+)/images/(?P<image_id>[^/]+)/%s$'
KEYPAIRS = r'^(?P<
|
tenant_id>[^/]+)/keypairs/%s$'
urlpatterns = patterns('django_openstack.dash.views.instances',
url(r'^(?P<tenant_id>[^/]+)/$', 'usage', name='dash_usage'),
url(r'^(?P<tenant_id>[^/]+)/instances/$', 'index', name='dash_instances'),
url(INSTANCES % 'console', 'console', name='dash_instances_console'),
url(INSTANCES % 'v
|
nc', 'vnc', name='dash_instances_vnc'),
)
urlpatterns += patterns('django_openstack.dash.views.images',
url(r'^(?P<tenant_id>[^/]+)/images/$', 'index', name='dash_images'),
url(IMAGES % 'launch', 'launch', name='dash_images_launch'),
)
urlpatterns += patterns('django_openstack.dash.views.keypairs',
url(r'^(?P<tenant_id>[^/]+)/keypairs/$', 'index', name='dash_keypairs'),
url(KEYPAIRS % 'create', 'create', name='dash_keypairs_create'),
)
|
jsmits/django-logutils
|
django_logutils/middleware.py
|
Python
|
bsd-3-clause
| 4,098
| 0
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import time
from django.db import connection
from django_logutils.conf import settings
logger = logging.getLogger(__name__)
def create_log_dict(request, response):
"""
Create a dictionary with logging data.
"""
remote_addr = request.META.get('REMOTE_ADDR')
if remote_addr in getattr(settings, 'INTERNAL_IPS', []):
remote_addr = request.META.get(
'HTTP_X_FORWARDED_FOR') or remote_addr
user_email = "-"
if hasattr(request, 'user'):
user_email = getattr(request.user, 'email', '-')
if response.streaming:
content_length = 'streaming'
else:
content_length = len(response.content)
return {
# 'event' makes event-based filtering possible in logging backends
# like logstash
'event': settings.LOGUTILS_LOGGING_MIDDLEWARE_EVENT,
'remote_address': remote_addr,
'user_email': user_email,
'method': request.method,
'url': request.get_full_path(),
'status': response.status_code,
'content_length': content_length,
'request_time': -1, # NA value: real value added by LoggingMiddleware
}
def create_log_message(log_dict, use_sql_info=False, fmt=True):
"""
Create the logging message string.
"""
|
log_msg = (
"%(remote_address)s
|
%(user_email)s %(method)s %(url)s %(status)d "
"%(content_length)d (%(request_time).2f seconds)"
)
if use_sql_info:
sql_time = sum(
float(q['time']) for q in connection.queries) * 1000
extra_log = {
'nr_queries': len(connection.queries),
'sql_time': sql_time}
log_msg += " (%(nr_queries)d SQL queries, %(sql_time)f ms)"
log_dict.update(extra_log)
return log_msg % log_dict if fmt else log_msg
class LoggingMiddleware(object):
"""
Capture request info and logs it.
Logs all requests with log level info. If request take longer than
REQUEST_TIME_THRESHOLD, log level warningis used.
Logging middleware that captures the following:
* logging event.
* remote address (whether proxied or direct).
* if authenticated, then user email address.
* request method (GET/POST etc).
* request full path.
* response status code (200, 404 etc).
* content length.
* request process time.
* if DEBUG=True or REQUEST_TIME_THRESHOLD is exceeded, also logs SQL
query information - number of queries and how long they too.
Based on: https://djangosnippets.org/snippets/2624/
"""
def __init__(self, *args, **kwargs):
"""
Add initial empty start_time.
"""
self.start_time = None
def process_request(self, request):
"""
Add start time to request.
"""
self.start_time = time.time()
def process_response(self, request, response):
"""
Create the logging message..
"""
try:
log_dict = create_log_dict(request, response)
# add the request time to the log_dict; if no start time is
# available, use -1 as NA value
request_time = (
time.time() - self.start_time if hasattr(self, 'start_time')
and self.start_time else -1)
log_dict.update({'request_time': request_time})
is_request_time_too_high = (
request_time > float(settings.LOGUTILS_REQUEST_TIME_THRESHOLD))
use_sql_info = settings.DEBUG or is_request_time_too_high
log_msg = create_log_message(log_dict, use_sql_info, fmt=False)
if is_request_time_too_high:
logger.warning(log_msg, log_dict, extra=log_dict)
else:
logger.info(log_msg, log_dict, extra=log_dict)
except Exception as e:
logger.exception(e)
return response
|
fritzo/distributions
|
distributions/tests/test_models.py
|
Python
|
bsd-3-clause
| 20,478
| 0
|
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
import numpy.random
import scipy.stats
import functools
from collections import defaultdict
from nose import SkipTest
from nose.tools import assert_greater
from nose.tools import assert_in
from nose.tools import assert_is_instance
from nose.tools import assert_not_equal
from nose.tools import assert_true
from goftests import density_goodness_of_fit
from goftests import discrete_goodness_of_fit
from goftests import vector_density_goodness_of_fit
from distributions.dbg.random import sample_discrete
from distributions.util import scores_to_probs
from distributions.tests.util import assert_all_close
from distributions.tests.util import assert_close
from distributions.tests.util import assert_hasattr
from distributions.tests.util import import_model
from distributions.tests.util import list_models
from distributions.tests.util import seed_all
try:
import distributions.io.schema_pb2
has_protobuf = True
except ImportError:
has_protobuf = False
DATA_COUNT = 20
SAMPLE_COUNT = 1000
MIN_GOODNESS_OF_FIT = 1e-3
MODULES = {
'{flavor}.models.{name}'.format(**spec): import_model(spec)
for spec in list_models()
}
IS_FAST = {'dbg': False, 'hp': True, 'lp': True}
def model_is_fast(model):
flavor = model.__name__.split('.')[1]
return IS_FAST[flavor]
def iter_examples(module):
assert_hasattr(module, 'EXAMPLES')
EXAMPLES = module.EXAMPLES
assert_is_instance(EXAMPLES, list)
assert_true(EXAMPLES, 'no examples provided')
for i, EXAMPLE in enumerate(EXAMPLES):
print 'example {}/{}'.format(1 + i, len(EXAMPLES))
assert_in('shared', EXAMPLE)
assert_in('values', EXAMPLE)
values = EXAMPLE['values']
assert_is_instance(values, list)
count = len(values)
assert_true(
count >= 7,
'Add more example values (expected >= 7, found {})'.format(count))
yield EXAMPLE
def for_each_model(*filters):
'''
Run one test per Model, filtering out inappropriate Models for test.
'''
def filtered(test_fun):
@functools.wraps(test_fun)
def test_one_model(name):
module = MODULES[name]
assert_hasattr(module, 'Shared')
for EXAMPLE in iter_examples(module):
test_fun(module, EXAMPLE)
@functools.wraps(test_fun)
def test_all_models():
for name in MODULES:
module = MODULES[name]
if all(f(module) for f in filters):
yield test_one_model, name
return test_all_models
return filtered
@for_each_model()
def test_value(module, EXAMPLE):
assert_hasattr(module, 'Value')
assert_is_instance(module.Value, type)
values = EXAMPLE['values']
for value in values:
assert_is_instance(value, module.Value)
@for_each_model()
def test_shared(module, EXAMPLE):
assert_hasattr(module, 'Shared')
assert_is_instance(module.Shared, type)
shared1 = module.Shared.from_dict(EXAMPLE['shared'])
shared2 = module.Shared.from_dict(EXAMPLE['shared'])
assert_close(shared1.dump(), EXAMPLE['shared'])
values = EXAMPLE['values']
seed_all(0)
for value in values:
shared1.add_value(value)
seed_all(0)
for value in values:
shared2.add_value(value)
assert_close(shared1.dump(), shared2.dump())
for value in values:
shared1.remove_value(value)
assert_close(shared1.dump(), EXAMPLE['shared'])
@for_each_model()
def test_group(module, EXAMPLE):
assert_hasattr(module, 'Group')
assert_is_instance(module.Group, type)
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
for value in values:
shared.add_value(value)
group1 = module.Group()
group1.init(shared)
for value in values:
group1.add_value(shared, value)
group2 = module.Group.from_values(shared, values)
assert_close(group1.dump(), group2.dump())
group = module.Group.from_values(shared, values)
dumped = group.dump()
group.init(shared)
group.load(dumped)
assert_close(group.dump(), dumped)
for value in values:
group2.remove_value(shared, value)
assert_not_equal(group1, group2)
group2.merge(shared, group1)
for value in values:
group1.score_value(shared, value)
for _ in xrange(10):
value = group1.sample_value(shared)
|
group1.score_value(shared, value)
module.sample_group(shared, 10)
group1.score_data(shared)
group2.score_data(shared)
@for_each_model(lambda module: hasattr(module.Shared, 'protobuf_load'))
def test_protobuf(module, EXAMPLE):
if not has_protobuf:
raise SkipTest('protobuf not available')
shared = module.S
|
hared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
Message = getattr(distributions.io.schema_pb2, module.NAME)
message = Message.Shared()
shared.protobuf_dump(message)
shared2 = module.Shared()
shared2.protobuf_load(message)
assert_close(shared2.dump(), shared.dump())
message.Clear()
dumped = shared.dump()
module.Shared.to_protobuf(dumped, message)
assert_close(module.Shared.from_protobuf(message), dumped)
if hasattr(module.Group, 'protobuf_load'):
for value in values:
shared.add_value(value)
group = module.Group.from_values(shared, values)
message = Message.Group()
group.protobuf_dump(message)
group2 = module.Group()
group2.protobuf_load(message)
assert_close(group2.dump(), group.dump())
message.Clear()
dumped = group.dump()
module.Group.to_protobuf(dumped, message)
assert_close(module.Group.from_protobuf(message), dumped)
@for_each_model()
def test_add_remove(module, EXAMPLE):
# Test group_add_value, group_remove_value, score_data, score_value
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
values = []
group = module.Group.from_values(shared)
score = 0.0
assert_close(group.score_data(shared), score, err_msg='p(empty) != 1')
for _ in range(DATA_COUNT):
value = group.sample_value(shared)
values.append(value)
score += group.score_value(shared, value)
group.add_value(shared, value)
group_all = module.Group.from_dict(group.dump())
assert_close(
score,
group.score_data(shared),
err_msg='p(x1,...,xn) != p(x1) p(x2|x1) p(xn|...)')
numpy.random.shuffle(values)
for value in values:
group.remove_value(shared, value)
group_empty = module.Grou
|
jawilson/home-assistant
|
tests/components/volumio/__init__.py
|
Python
|
apache-2.0
| 41
| 0
|
"""Tests for the Volumio integration.""
|
"
|
|
srgblnch/TangoDeviceWatchdog
|
tango-ds/dog.py
|
Python
|
gpl-3.0
| 27,005
| 0.000185
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
__author__ = "Sergi Blanch-Torne"
__email__ = "sblanch@cells.es"
__copyright__ = "Copyright 2016 CELLS/Alba synchrotron"
__license__ = "GPLv3+"
__status__ = "development"
__all__ = ["Logger", "Dog", "WatchdogTester", "main"]
__docformat__ = 'restructuredtext'
try:
from fandango import Astor # soft dependency
except:
Astor = None
import email.mime.text
import smtplib
from socket import gethostname
from PyTango import DeviceProxy, DevState, EventType, DevFailed
from time import sleep, time
from threading import Thread, Event
import traceback
DEFAULT_RECHECK_TIME = 90.0 # seconds
DEFAULT_nOVERLAPS_ALERT = 10
DEFAULT_ASTOR_nSTOPS = 2
DEFAULT_ASTOR_STOPWAIT = 3 # seconds
SEPARATOR = "\\"
class Logger(object):
def __init__(self, parent, *args, **kwargs):
super(Logger, self).__init__(*args, **kwargs)
self._parent = parent
# --- tango streams
self.error_stream = parent.error_stream
self.warn_stream = parent.warn_stream
self.info_stream = parent.info_stream
self.debug_stream = parent.debug_stream
# --- tango event retransmission
self.fireEventsList = parent.fireEventsList
# --- running
self.isInRunningLst = parent.isInRunningLst
self.appendToRunning = parent.appendToRunning
self.removeFromRunning = parent.removeFromRunning
# --- fault
self.isInFaultLst = parent.isInFaultLst
self.appendToFault = parent.appendToFault
self.removeFromFault = parent.removeFromFault
# --- hang
self.isInHangLst = parent.isInHangLst
self.appendToHang = parent.appendToHang
self.removeFromHang = parent.removeFromHang
# --- mailto
self.mailto = parent.mailto
def fireEvent(self, attrName, value, timestamp=None, quality=None):
attrFullName = "%s%s%s"\
% (self.devName.replace("/", SEPARATOR), SEPARATOR, attrName)
try:
if timestamp and quality:
self.fireEventsList([[attrFullName, value, timestamp,
quality]])
else:
self.fireEventsList([[attrFullName, value]])
except Exception as e:
self.error_stream("Cannot fire event for %s/%s: %s"
% (self.devName, attrName, e))
traceback.print_exc()
class Dog(Logger):
def __init__(self, devName, joinerEvent=None, startDelay=None,
extraAttrs=None, *args, **kwargs):
super(Dog, self).__init__(*args, **kwargs)
self._devName = devName
self._devProxy = None
self._eventId = None
self._devState = None
# --- fault vbles
self._tryFaultRecovery = False
self._faultRecoveryCtr = 0
self._devStatus = None
# --- hangVbles
self._tryHangRecovery = False
self._hangRecoveryCtr = 0
# --- Thread for hang monitoring
self._joinerEvent = joinerEvent
self._thread = None
self._recheckPeriod = DEFAULT_RECHECK_TIME
self._overlaps = 0
self._overlapsAlert = DEFAULT_nOVERLAPS_ALERT
# --- extra attributes
self._extraAttributes = []
self._extraEventIds = {}
self._extraAttrValues = {}
for attrName in extraAttrs:
attrName = attrName.lower()
self._extraAttributes.append(attrName)
self._extraEventIds[attrName] = None
self._extraAttrValues[attrName] = None
# --- build proxy and event subscriptions
self.__buildProxy()
self.__createThread(startDelay)
def __str__(self):
return "Dog(%s, state=%s)" % (self.devName, self.devState)
def __repr__(self):
return "Dog(%s, state=%s, faultRecovery=%s, hangRecovery=%s)"\
% (self.devName, self.devState, self.tryFaultRecovery,
self.tryHangRecovery)
# --- object properties
@property
def devName(self):
return self._devName
@property
def devProxy(self):
return self._devProxy
@property
def devState(self):
return self._devState
def hasExtraAttr(self, attrName):
return self._extraAttributes.count(attrName.lower()) > 0
def getExtraAttr(self, attrName):
try:
value = self._devProxy[attrName].value
timestamp = self._devProxy[attrName].time.totime()
quality = self._devProxy[attrName].quality
if value != self._extraAttrValues[attrName]:
self.debug_stream("%s/%s has changed from %s to %s"
% (self.devName, attrName,
self._extraAttrValues[attrName], value))
self._extraAttrValues[attrName] = value
self.fireEvent(attrName, value, timestamp, quality)
if self.isInHangLst(self.devName):
self.removeFromHang(self.devName)
return value
except DevFailed as e:
if not self.isInHangLst(self.devName):
try:
self.devProxy.State()
except:
self.appendToHang(self.devName)
if e[0].reason in ['ATTRIBUTE_UNAVAILABLE',
'SOFTWARE_FAILURE']:
return
sel
|
f.warn_stream("%s/%s rea
|
d exception: %r %s"
% (self.devName, attrName,
e[0].reason, e[0].desc))
except Exception as e:
self.error_stream("%s/%s read exception: %s"
% (self.devName, attrName, e))
raise Exception("%s/%s cannot be read" % (self.devName, attrName))
def setExtraAttr(self, attrName, value):
try:
self.info_stream("Writing %s/%s with %s"
% (self.devName, attrName, str(value)))
self._devProxy[attrName] = value
if self.isInHangLst(self.devName):
self.removeFromHang(self.devName)
except DevFailed as e:
if not self.isInHangLst(self.devName):
try:
self.devProxy.State()
except:
self.appendToHang(self.devName)
self.warn_stream("%s/%s write exception: %r %s"
% (self.devName, attrName,
e[0].reason, e[0].desc))
except Exception as e:
self.error_stream("%s/%s write exception: %s"
% (self.devName, attrName, e))
raise Exception("%s/%s cannot be write" % (self.devName, attrName))
@property
def tryFaultRecovery(self):
return self._tryFaultRecovery
@tryFaultRecovery.setter
def tryFaultRecovery(self, value):
if type(value) == bool:
self._tryFaultRecovery = value
else:
self.error_stream("Only boolean assignment")
@property
def tryHangRecovery(self):
return self._tryHangRecovery
@tryFaultRecovery.setter
def tryHangRecovery(self, value):
if type(value) == bool:
if value and not Astor:
self.error_stream("This feature is only available with "
"fandango's Astor present")
|
alexlo03/ansible
|
lib/ansible/modules/utilities/logic/async_wrapper.py
|
Python
|
gpl-3.0
| 10,223
| 0.002152
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
import shlex
import shutil
import os
import subprocess
import sys
import traceback
import signal
import time
import syslog
PY3 = sys.version_info[0] == 3
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
def notice(msg):
syslog.syslog(syslog.LOG_NOTICE, msg)
def daemonize_self():
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
# decouple from parent environment (does not chdir / to keep the directory context the same as for non async tasks)
os.setsid()
os.umask(int('022', 8))
# do second fork
try:
pid = os.fork()
if pid > 0:
# print "Daemon PID %d" % pid
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
dev_null = open('/dev/null', 'w')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
# NB: this function copied from module_utils/json_utils.py. Ensure any changes are propagated there.
# FUTURE: AnsibleModule-ify this module so it's Ansiballz-compatible and can use the module_utils copy of this function.
def _filter_non_json_lines(data):
'''
Used to filter unrelated
|
output around module JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
Filters leading lines before first line-starting occurrence of
|
'{' or '[', and filter all
trailing lines after matching close character (working from the bottom of output).
'''
warnings = []
# Filter initial junk
lines = data.splitlines()
for start, line in enumerate(lines):
line = line.strip()
if line.startswith(u'{'):
endchar = u'}'
break
elif line.startswith(u'['):
endchar = u']'
break
else:
raise ValueError('No start of json char found')
# Filter trailing junk
lines = lines[start:]
for reverse_end_offset, line in enumerate(reversed(lines)):
if line.strip().endswith(endchar):
break
else:
raise ValueError('No end of json char found')
if reverse_end_offset > 0:
# Trailing junk is uncommon and can point to things the user might
# want to change. So print a warning if we find any
trailing_junk = lines[len(lines) - reverse_end_offset:]
warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
lines = lines[:(len(lines) - reverse_end_offset)]
return ('\n'.join(lines), warnings)
def _get_interpreter(module_path):
module_fd = open(module_path, 'rb')
try:
head = module_fd.read(1024)
if head[0:2] != '#!':
return None
return head[2:head.index('\n')].strip().split(' ')
finally:
module_fd.close()
def _run_module(wrapped_cmd, jid, job_path):
tmp_job_path = job_path + ".tmp"
jobfile = open(tmp_job_path, "w")
jobfile.write(json.dumps({"started": 1, "finished": 0, "ansible_job_id": jid}))
jobfile.close()
os.rename(tmp_job_path, job_path)
jobfile = open(tmp_job_path, "w")
result = {}
outdata = ''
filtered_outdata = ''
stderr = ''
try:
cmd = shlex.split(wrapped_cmd)
# call the module interpreter directly (for non-binary modules)
# this permits use of a script for an interpreter on non-Linux platforms
interpreter = _get_interpreter(cmd[0])
if interpreter:
cmd = interpreter + cmd
script = subprocess.Popen(cmd, shell=False, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(outdata, stderr) = script.communicate()
if PY3:
outdata = outdata.decode('utf-8', 'surrogateescape')
stderr = stderr.decode('utf-8', 'surrogateescape')
(filtered_outdata, json_warnings) = _filter_non_json_lines(outdata)
result = json.loads(filtered_outdata)
if json_warnings:
# merge JSON junk warnings with any existing module warnings
module_warnings = result.get('warnings', [])
if not isinstance(module_warnings, list):
module_warnings = [module_warnings]
module_warnings.extend(json_warnings)
result['warnings'] = module_warnings
if stderr:
result['stderr'] = stderr
jobfile.write(json.dumps(result))
except (OSError, IOError):
e = sys.exc_info()[1]
result = {
"failed": 1,
"cmd": wrapped_cmd,
"msg": str(e),
"outdata": outdata, # temporary notice only
"stderr": stderr
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
except (ValueError, Exception):
result = {
"failed": 1,
"cmd": wrapped_cmd,
"data": outdata, # temporary notice only
"stderr": stderr,
"msg": traceback.format_exc()
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
jobfile.close()
os.rename(tmp_job_path, job_path)
if __name__ == '__main__':
if len(sys.argv) < 5:
print(json.dumps({
"failed": True,
"msg": "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile> [-preserve_tmp] "
"Humans, do not call directly!"
}))
sys.exit(1)
jid = "%s.%d" % (sys.argv[1], os.getpid())
time_limit = sys.argv[2]
wrapped_module = sys.argv[3]
argsfile = sys.argv[4]
if '-tmp-' not in os.path.dirname(wrapped_module):
preserve_tmp = True
elif len(sys.argv) > 5:
preserve_tmp = sys.argv[5] == '-preserve_tmp'
else:
preserve_tmp = False
# consider underscore as no argsfile so we can support passing of additional positional parameters
if argsfile != '_':
cmd = "%s %s" % (wrapped_module, argsfile)
else:
cmd = wrapped_module
step = 5
async_dir = os.environ.get('ANSIBLE_ASYNC_DIR', '~/.ansible_async')
# setup job output directory
jobdir = os.path.expanduser(async_dir)
job_path = os.path.join(jobdir, jid)
if not os.path.exists(jobdir):
try:
os.makedirs(jobdir)
except:
print(json.dumps({
"failed": 1,
"msg": "could not create: %s" % jobdir
}))
# immediately exit this process, leaving an orphaned process
# running which immediately forks a supervisory timing process
try:
pid = os.fork()
if pid:
# Notify the overlord that the async process started
# we need to not return immediately such that the launched command has an attempt
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
# this probably could be done with some IPC later. Modules should always read
# the argsfile at the very first start of their execution anyway
notice("Return async_wrapper task started.")
print(json.dumps({"started": 1, "finished": 0, "ansible_job_id": jid, "results_file": job_path,
"_ansible_suppress_tmpdir_delete": not preserve_tmp}))
sys.stdout.flush()
time.sleep(1)
sys.exit(0)
else:
# The actual wrapper process
# Daemonize,
|
flavour/eden
|
modules/s3db/hrm.py
|
Python
|
mit
| 471,286
| 0.010189
|
# -*- coding: utf-8 -*-
""" Sahana Eden Human Resources Management
@copyright: 2011-2021 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("HRModel",
"HRSiteModel",
"HRSalaryModel",
"HRInsuranceModel",
#"HRJobModel",
"HRContractModel",
"HRSkillModel",
"HRTagModel",
"HRAppraisalModel",
"HRExperienceModel",
"HRAwardModel",
"HRDisciplinaryActionModel",
"HRProgrammeModel",
"HRShiftModel",
"HRDelegationModel",
"hrm_AssignMethod",
"hrm_competency_controller",
"hrm_compose",
"hrm_configure_pr_group_membership",
"hrm_credential_controller",
"hrm_CV",
"hrm_experience_controller",
"hrm_group_controller",
"hrm_human_resource_controller",
"hrm_human_resource_filters",
"hrm_HumanResourceRepresent",
"hrm_human_resource_onaccept",
"hrm_map_popup",
#"hrm_Medical",
"hrm_person_controller",
#"hrm_position_represent",
"hrm_Record",
"hrm_rheader",
"hrm_training_controller",
"hrm_training_event_controller",
"hrm_TrainingEventRepresent",
"hrm_xls_list_fields",
#"hrm_competency_list_layout",
#"hrm_credential_list_layout",
#"hrm_experience_list_layout",
#"hrm_training_list_layout",
)
import datetime
import json
from gluon import *
from gluon.sqlhtml import RadioWidget
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3PopupLink
# Compact JSON encoding
SEPARATORS = (",", ":")
# =============================================================================
class HRModel(S3Model):
names = ("hrm_department",
"hrm_department_id",
"hrm_job_title",
"hrm_job_title_id",
"hrm_job_title_human_resource",
"hrm_human_resource",
"hrm_human_resource_id",
"hrm_type_opts",
"hrm_human_resource_represent",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
auth = current.auth
settings = current.deployment_settings
ADMIN = current.session.s3.system_roles.ADMIN
is_admin = auth.s3_has_role(ADMIN)
messages = current.messages
AUTOCOMPLETE_HELP = messages.AUTOCOMPLETE_HELP
#ORGANISATION = messages.ORGANISATION
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
organisation_id = self.org_organisation_id
root_org = auth.r
|
oot_org()
if is_admin:
filter_opts = ()
elif root_org:
filter_opts = (root_org, None)
else:
filter_opts = (None,)
mix_staff = settings.get_hrm_mix_staff()
request = current.request
controller = request.controller
group
|
= request.get_vars.get("group", None)
if not group:
if mix_staff:
group = None
elif controller == "vol":
group = "volunteer"
elif controller == "deploy":
group = None
#elif controller in ("hrm", "org", "inv", "cr", "hms", "req"):
else:
group = "staff"
# =====================================================================
# Departments
#
tablename = "hrm_department"
define_table(tablename,
Field("name", notnull=True, length=64,
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
# Only included in order to be able to set
# realm_entity to filter appropriately
organisation_id(default = root_org,
readable = is_admin,
writable = is_admin,
),
s3_comments(label = T("Description"),
comment = None,
),
*s3_meta_fields())
label_create = T("Create Department")
crud_strings[tablename] = Storage(
label_create = label_create,
title_display = T("Department Details"),
title_list = T("Department Catalog"),
title_update = T("Edit Department"),
title_upload = T("Import Departments"),
label_list_button = T("List Departments"),
label_delete_button = T("Delete Department"),
msg_record_created = T("Department added"),
msg_record_modified = T("Department updated"),
msg_record_deleted = T("Department deleted"),
msg_list_empty = T("Currently no entries in the catalog"))
represent = S3Represent(lookup = tablename)
department_id = S3ReusableField("department_id", "reference %s" % tablename,
label = T("Department / Unit"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "hrm_department.id",
represent,
filterby = "organisation_id",
filter_opts = filter_opts,
)),
sortby = "name",
comment = S3PopupLink(c = "vol" if group == "volunteer" else "hrm",
f = "department",
label = label_create,
),
)
configure("hrm_department",
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("organisation_id",),
),
)
# =====================================================================
# Job Titles (Mayon: StaffResourceType)
#
STAFF = settings.get_hrm_staff_label()
if settings.has_module("vol"):
hrm_types = True
hrm_type_opts = {1: STAFF,
|
adw0rd/lettuce-py3
|
tests/functional/test_terrain.py
|
Python
|
gpl-3.0
| 2,378
| 0.002103
|
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
#
|
Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is f
|
ree software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
from os.path import dirname, abspath, join, curdir
from nose.tools import assert_equals, with_setup
from tests.asserts import prepare_stdout
def test_imports_terrain_under_path_that_is_run():
old_path = abspath(curdir)
os.chdir(join(abspath(dirname(__file__)), 'simple_features', '1st_feature_dir'))
status, output = subprocess.getstatusoutput('python -c "from lettuce import world;assert hasattr(world, \'works_fine\'); print \'it passed!\'"')
assert_equals(status, 0)
assert_equals(output, "it passed!")
os.chdir(old_path)
@with_setup(prepare_stdout)
def test_after_each_all_is_executed_before_each_all():
"terrain.before.each_all and terrain.after.each_all decorators"
from lettuce import step
from lettuce import Runner
from lettuce.terrain import before, after, world
world.all_steps = []
@before.all
def set_state_to_before():
world.all_steps.append('before')
@step('append 1 in world all steps')
def append_1_in_world_all_steps(step):
world.all_steps.append("1")
@step('append 2 more')
def append_2_more(step):
world.all_steps.append("2")
@step('append 3 in world all steps')
def append_during_to_all_steps(step):
world.all_steps.append("3")
@after.all
def set_state_to_after(total):
world.all_steps.append('after')
runner = Runner(join(abspath(dirname(__file__)), 'simple_features', '2nd_feature_dir'))
runner.run()
assert_equals(
world.all_steps,
['before', '1', '2', '3', 'after']
)
|
pelotoncycle/shared_memory_bloomfilter
|
setup.py
|
Python
|
gpl-3.0
| 572
| 0.012238
|
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
setup(name='peloton_bloomfilters',
auth
|
or = 'Adam DePrince',
author_email = 'adam@pelotoncycle.com',
url = 'https://github.com/pelotoncycle/peloton_bloomfilters',
version='0.0.1',
description="Peloton Cycle's Bloomin fast Bloomfilters",
ext_modules=(
[
Extension(
na
|
me='peloton_bloomfilters',
sources=['peloton_bloomfiltersmodule.c']),
]
)
)
|
gmimano/commcaretest
|
corehq/apps/telerivet/forms.py
|
Python
|
bsd-3-clause
| 825
| 0.002424
|
from django.forms.fields import *
from corehq.apps.sms.forms import BackendForm
from dimagi.utils.django.fields import TrimmedCharField
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
class Teleri
|
vetBackendForm(BackendForm):
api_key = TrimmedCharField()
project_id = TrimmedCharField()
phone_id = TrimmedCharField()
webhook_secret = TrimmedCharField()
def clean_webhook_secret(self):
# Circular import
|
from corehq.apps.telerivet.models import TelerivetBackend
value = self.cleaned_data.get("webhook_secret", None)
backend = TelerivetBackend.by_webhook_secret(value)
if backend is not None and backend._id != self._cchq_backend_id:
raise ValidationError(_("Already in use."))
return value
|
deepmind/Temporal-3D-Pose-Kinetics
|
third_party/hmr/renderer.py
|
Python
|
apache-2.0
| 5,948
| 0.009247
|
"""Render meshes using OpenDR.
Code is from:
https://github.com/akanazawa/hmr/blob/master/src/util/renderer.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import cv2
import numpy as np
from opendr.camera import ProjectPoints
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
colors = {
# colorbline/print/copy safe:
'light_blue': [0.65098039, 0.74117647, 0.85882353],
'light_pink': [.9, .7, .7], # This is used to do no-3d
}
class SMPLRenderer(object):
"""Utility class to render SMPL models."""
def __init__(self, img_size=224, flength=500., face_path='smpl_faces.npy'):
self.faces = np.load(face_path)
self.w = img_size
self.h = img_size
self.flength = flength
def __call__(self,
verts,
cam=None,
img=None,
do_alpha=False,
far=None,
near=None,
color_id=0,
img_size=None):
# cam is 3D [f, px, py]
if img is not None:
h, w = img.shape[:2]
elif img_size is not None:
h = img_size[0]
w = img_size[1]
else:
h = self.h
w = self.w
if cam is None:
cam = [self.flength, w / 2., h / 2.]
use_cam = ProjectPoints(
f=cam[0] * np.ones(2),
rt=np.zeros(3),
t=np.zeros(3),
k=np.zeros(5),
c=cam[1:3])
if near is None:
near = np.maximum(np.min(verts[:, 2]) - 25, 0.1)
if far is None:
far = np.maximum(np.max(verts[:, 2]) + 25, 25)
imtmp = render_model(
verts,
self.faces,
w,
h,
use_cam,
do_alpha=do_alpha,
img=img,
far=far,
near=near,
color_id=color_id)
return (imtmp * 255).astype('uint8')
def rotated(self,
verts,
deg,
cam=None,
axis='y',
img=None,
do_alpha=True,
far=None,
near=None,
color_id=0,
img_size=None):
if axis == 'y':
around = cv2.Rodrigues(np.array([0, math.radians(deg), 0]))[0]
elif axis == 'x':
around = cv2.Rodrigues(np.array([math.radians(deg), 0, 0]))[0]
else:
around = cv2.Rodrigues(np.array([0, 0, math.radians(deg)]))[0]
center = verts.mean(axis=0)
new_v = np.dot((verts - center), around) + center
return self.__call__(
new_v,
cam,
img=img,
do_alpha=do_alpha,
far=far,
near=near,
img_size=img_size,
color_id=color_id)
def _create_renderer(w=640,
h=480,
rt=np.zeros(3),
t=np.zeros(3),
f=None,
c=None,
k=None,
near=.5,
far=10.):
f = np.array([w, w]) / 2. if f is None else f
c = np.array([w, h]) / 2. if c is None else c
k = np.zeros(5) if k is None else k
rn = ColoredRenderer()
rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k)
rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w}
return rn
def _rotateY(points, angle):
"""Rotate the points by a specified angle."""
ry = np.array([[np
|
.cos(angle), 0., np.sin(angle)], [0., 1., 0.],
[-np.sin(angle), 0., np.cos(angle)]])
return np.dot(points, ry
|
)
def simple_renderer(rn,
verts,
faces,
yrot=np.radians(120),
color=colors['light_pink']):
# Rendered model color
rn.set(v=verts, f=faces, vc=color, bgcolor=np.ones(3))
albedo = rn.vc
# Construct Back Light (on back right corner)
rn.vc = LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([-200, -100, -100]), yrot),
vc=albedo,
light_color=np.array([1, 1, 1]))
# Construct Left Light
rn.vc += LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([800, 10, 300]), yrot),
vc=albedo,
light_color=np.array([1, 1, 1]))
# Construct Right Light
rn.vc += LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([-500, 500, 1000]), yrot),
vc=albedo,
light_color=np.array([.7, .7, .7]))
return rn.r
def get_alpha(imtmp, bgval=1.):
h, w = imtmp.shape[:2]
alpha = (~np.all(imtmp == bgval, axis=2)).astype(imtmp.dtype)
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge(
(b_channel, g_channel, r_channel, alpha.astype(imtmp.dtype)))
return im_RGBA
def append_alpha(imtmp):
alpha = np.ones_like(imtmp[:, :, 0]).astype(imtmp.dtype)
if np.issubdtype(imtmp.dtype, np.uint8):
alpha = alpha * 255
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge((b_channel, g_channel, r_channel, alpha))
return im_RGBA
def render_model(verts,
faces,
w,
h,
cam,
near=0.5,
far=25,
img=None,
do_alpha=False,
color_id=None):
rn = _create_renderer(
w=w, h=h, near=near, far=far, rt=cam.rt, t=cam.t, f=cam.f, c=cam.c)
# Uses img as background, otherwise white background.
if img is not None:
rn.background_image = img / 255. if img.max() > 1 else img
if color_id is None:
color = colors['light_blue']
else:
color_list = colors.values()
color = color_list[color_id % len(color_list)]
imtmp = simple_renderer(rn, verts, faces, color=color)
# If white bg, make transparent.
if img is None and do_alpha:
imtmp = get_alpha(imtmp)
elif img is not None and do_alpha:
imtmp = append_alpha(imtmp)
return imtmp
|
minidron/django-geoaddress
|
django_geoaddress/models.py
|
Python
|
gpl-2.0
| 3,479
| 0
|
# --coding: utf8--
import requests
from django.contrib.gis.db import models
from django.contrib.gis.geos import GEOSGeometry
class Country(models.Model):
"""
Модель страны.
"""
title = models.CharField(
u'название', max_length=255)
class Meta:
verbose_name = u'страна'
verbose_name_plural = u'страны'
ordering = ['title']
def __unicode__(self):
return self.title
class BaseAddress(models.Model):
"""
Базовый класс адреса с ГЕО данными.
"""
country = models.ForeignKey(
Country,
verbose_name=u'страна')
area = models.CharField(
u'область', max_length=255, blank=True)
subarea = models.CharField(
u'район', max_length=255, blank=True)
locality = models.CharField(
u'населенный пункт', max_length=255)
street = models.CharField(
u'улица', max_length=255, blank=True)
house = models.CharField(
u'дом', max_length=50, blank=True)
apartment = models.CharField(
u'офис', max_length=10, blank=True)
zip = models.CharField(
u'почтовый индекс', max_length=10, blank=True)
coordinates = models.PointField(
u'координаты', blank=True, null=True) # широта долгота
# Используем GeoManager, чтобы делать ГЕО запросы
objects = models.GeoManager()
class Meta:
verbose_name = u'адрес'
verbose_name_plural = u'адреса'
def __unicode__(self):
return ', '.join(part for part in [self.zip, self.country.title,
self.area, self.subarea,
self.locality, self.street,
self.house] if part)
def fetch_coordinates(self):
"""
Запрос координатов объекта с Яндекса.
"""
query = ',+'.join(
part for part in [self.country.title, self.area, self.subarea,
self.locality, self.street, self.house] if part)
url = u'http://geocode-maps.yandex.ru/1.x/?geocode=%s&format=json' % (
query)
try:
r = requests.get(url).json()
except requests.exceptions.RequestException:
return None
try:
longitude, latitude = (r['response']['GeoObjectCollection']
['featureMember'][0]['GeoObject']['Point']
['pos']).split(' ')
return GEOSGeometry(U'POINT(%s %s)' % (longitude, latitude))
except (KeyError, IndexError):
ret
|
urn None
def get_short_address(self):
return ', '.join(part for part in [self.a
|
rea, self.locality] if part)
class Region(models.Model):
"""
Класс для географического региона.
"""
name = models.CharField(u'название', max_length=255)
coordinates = models.PolygonField(u'координаты')
# Используем GeoManager, чтобы делать ГЕО запросы
objects = models.GeoManager()
class Meta:
verbose_name = u'регион'
verbose_name_plural = u'регионы'
ordering = ['name']
def __unicode__(self):
return self.name
|
asgeirrr/word_cloud
|
setup.py
|
Python
|
mit
| 469
| 0
|
from distutils.core import setup
from distutils.extension import Extension
setup(
|
name='wordcloud',
version='1.1.3',
url='https://github.com/amueller/word_cloud',
description='A little word cloud generator',
license='MIT',
ext_modules=[Extension("wo
|
rdcloud.query_integral_image",
["wordcloud/query_integral_image.c"])],
packages=['wordcloud'],
package_data={'wordcloud': ['stopwords', 'DroidSansMono.ttf']}
)
|
liyongsea/DIGITS
|
scripts/test_generate_docs.py
|
Python
|
bsd-3-clause
| 2,056
| 0.005837
|
# Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
import os.path
import sys
import tempfile
import itertools
import unittest
try:
import flask.ext.autodoc
except ImportError as e:
raise unittest.SkipTest('Flask-Autodoc not installed')
try:
import digits
except ImportError:
# Add path for DIGITS pac
|
kage
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import digits.config; digits.config.load_config()
from digits.webapp import app, _doc as doc
from . import generate_docs as _
def check_doc_fi
|
le(generator, doc_filename):
"""
Checks that the output generated by generator matches the contents of doc_filename
"""
with tempfile.NamedTemporaryFile(suffix='.md') as tmp_file:
generator.generate(tmp_file.name)
tmp_file.seek(0)
with open(doc_filename) as doc_file:
# memory friendly
for doc_line, tmp_line in itertools.izip(doc_file, tmp_file):
doc_line = doc_line.strip()
tmp_line = tmp_line.strip()
if doc_line.startswith('*Generated') and \
tmp_line.startswith('*Generated'):
# If the date is different, that's not a problem
pass
elif doc_line != tmp_line:
print '(Previous)', doc_line
print '(New) ', tmp_line
raise RuntimeError('%s needs to be regenerated. Use scripts/generate_docs.py' % doc_filename)
def test_api_md():
"""API.md out-of-date"""
with app.app_context():
generator = _.ApiDocGenerator(doc)
check_doc_file(generator,
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'docs', 'API.md'))
def test_flask_routes_md():
"""FlaskRoutes.md out-of-date"""
with app.app_context():
generator = _.FlaskRoutesDocGenerator(doc)
check_doc_file(generator,
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'docs', 'FlaskRoutes.md'))
|
LyanJin/J_lyan
|
New.py
|
Python
|
epl-1.0
| 44
| 0.022727
|
# -*- coding:u
|
tf8 -*-
|
a = 3
b = 4
print a+b
|
ffu/DSA-3.2.2
|
gr-wxgui/src/python/forms/converters.py
|
Python
|
gpl-3.0
| 5,122
| 0.029871
|
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free
|
software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more detail
|
s.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import eng_notation
import math
class abstract_converter(object):
def external_to_internal(self, v):
"""
Convert from user specified value to value acceptable to underlying primitive.
The underlying primitive usually expects strings.
"""
raise NotImplementedError
def internal_to_external(self, s):
"""
Convert from underlying primitive value to user specified value.
The underlying primitive usually expects strings.
"""
raise NotImplementedError
def help(self):
return "Any string is acceptable"
class identity_converter(abstract_converter):
def external_to_internal(self,v):
return v
def internal_to_external(self, s):
return s
########################################################################
# Commonly used converters
########################################################################
class chooser_converter(abstract_converter):
"""
Convert between a set of possible choices and an index.
Used in the chooser base and all sub-classes.
"""
def __init__(self, choices):
#choices must be a list because tuple does not have .index() in python2.5
self._choices = list(choices)
def external_to_internal(self, choice):
return self._choices.index(choice)
def internal_to_external(self, index):
return self._choices[index]
def help(self):
return 'Enter a possible value in choices: "%s"'%str(self._choices)
class bool_converter(abstract_converter):
"""
The internal representation is boolean.
The external representation is specified.
Used in the check box form.
"""
def __init__(self, true, false):
self._true = true
self._false = false
def external_to_internal(self, v):
return bool(v)
def internal_to_external(self, v):
if v: return self._true
else: return self._false
def help(self):
return "Value must be cast-able to type bool."
class eval_converter(abstract_converter):
"""
A catchall converter when int and float are not enough.
Evaluate the internal representation with python's eval().
Possible uses, set a complex number, constellation points.
Used in text box.
"""
def __init__(self, formatter=lambda x: '%s'%(x)):
self._formatter = formatter
def external_to_internal(self, v):
return self._formatter(v)
def internal_to_external(self, s):
return eval(s)
def help(self):
return "Value must be evaluatable by python's eval."
class str_converter(abstract_converter):
def __init__(self, formatter=lambda x: '%s'%(x)):
self._formatter = formatter
def external_to_internal(self, v):
return self._formatter(v)
def internal_to_external(self, s):
return str(s)
class int_converter(abstract_converter):
def __init__(self, formatter=lambda x: '%d'%round(x)):
self._formatter = formatter
def external_to_internal(self, v):
return self._formatter(v)
def internal_to_external(self, s):
return int(s, 0)
def help(self):
return "Enter an integer. Leading 0x indicates hex"
class float_converter(abstract_converter):
def __init__(self, formatter=eng_notation.num_to_str):
self._formatter = formatter
def external_to_internal(self, v):
return self._formatter(v)
def internal_to_external(self, s):
return eng_notation.str_to_num(s)
def help(self):
return "Enter a float with optional scale suffix. E.g., 100.1M"
class slider_converter(abstract_converter):
"""
Scale values to and from the slider.
"""
def __init__(self, minimum, maximum, num_steps, cast):
assert minimum < maximum
assert num_steps > 0
self._offset = minimum
self._scaler = float(maximum - minimum)/num_steps
self._cast = cast
def external_to_internal(self, v):
#slider's internal representation is an integer
return int(round((v - self._offset)/self._scaler))
def internal_to_external(self, v):
return self._cast(v*self._scaler + self._offset)
def help(self):
return "Value should be within slider range"
class log_slider_converter(slider_converter):
def __init__(self, min_exp, max_exp, num_steps, base):
assert min_exp < max_exp
assert num_steps > 0
self._base = base
slider_converter.__init__(self, minimum=min_exp, maximum=max_exp, num_steps=num_steps, cast=float)
def external_to_internal(self, v):
return slider_converter.external_to_internal(self, math.log(v, self._base))
def internal_to_external(self, v):
return self._base**slider_converter.internal_to_external(self, v)
|
Azure/azure-sdk-for-python
|
sdk/eventhub/azure-eventhub/azure/eventhub/aio/_eventprocessor/event_processor.py
|
Python
|
mit
| 18,174
| 0.002531
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import random
from typing import (
Dict,
Callable,
Any,
Union,
List,
TYPE_CHECKING,
Optional,
Iterable,
Awaitable,
cast,
)
import uuid
import asyncio
import logging
from functools import partial
from ..._common import EventData
from ..._eventprocessor.common import CloseReason, LoadBalancingStrategy
from ..._eventprocessor._eventprocessor_mixin import EventProcessorMixin
from ..._utils import get_event_links
from .partition_context import PartitionContext
from .in_memory_checkpoint_store import InMemoryCheckpointStore
from .checkpoint_store import CheckpointStore
from ._ownership_manager import OwnershipManager
from .utils import get_running_loop
from .._async_utils import get_dict_with_loop_if_needed
if TYPE_CHECKING:
from datetime import datetime
from .._consumer_async import EventHubConsumer
from .._consumer_client_async import EventHubConsumerClient
_LOGGER = logging.getLogger(__name__)
class EventProcessor(
EventProcessorMixin
): # pylint:disable=too-many-instance-attributes
"""
An EventProcessor constantly receives events from one or multiple partitions of the Event Hub
in the context of a given consumer group.
"""
def __init__(
self,
eventhub_client: "EventHubConsumerClient",
consumer_group: str,
event_handler: Callable[[PartitionContext, Union[Optional[EventData], List[EventData]]], Awaitable[None]],
*,
batch: Optional[bool] = False,
max_batch_size: Optional[int] = 300,
max_wait_time: Optional[float] = None,
partition_id: Optional[str] = None,
checkpoint_store: Optional[CheckpointStore] = None,
initial_event_position: Union[str, int, "datetime", Dict[str, Any]] = "@latest",
initial_event_position_inclusive: Union[bool, Dict[str, bool]] = False,
load_balancing_interval: float = 10.0,
partition_ownership_expiration_interval: Optional[float] = None,
load_balancing_strategy: LoadBalancingStrategy = LoadBalancingStrategy.GREEDY,
owner_level: Optional[int] = None,
prefetch: Optional[int] = None,
track_last_enqueued_event_properties: bool = False,
error_handler: Optional[
Callable[[PartitionContext, Exception], Awaitable[None]]
] = None,
partition_initialize_handler: Optional[
Callable[[PartitionContext], Awaitable[None]]
] = None,
partition_close_handler: Optional[
Callable[[PartitionContext, CloseReason], Awaitab
|
le[None]]
] = None,
|
loop: Optional[asyncio.AbstractEventLoop] = None
):
self._consumer_group = consumer_group
self._eventhub_client = eventhub_client
self._namespace = (
eventhub_client._address.hostname # pylint: disable=protected-access
)
self._eventhub_name = eventhub_client.eventhub_name
self._partition_id = partition_id
self._event_handler = event_handler
self._batch = batch
self._max_batch_size = max_batch_size
self._max_wait_time = max_wait_time
self._error_handler = error_handler
self._partition_initialize_handler = partition_initialize_handler
self._partition_close_handler = partition_close_handler
self._checkpoint_store = checkpoint_store or InMemoryCheckpointStore()
self._initial_event_position = initial_event_position
self._initial_event_position_inclusive = initial_event_position_inclusive
self._load_balancing_interval = load_balancing_interval
self._ownership_timeout = partition_ownership_expiration_interval \
if partition_ownership_expiration_interval is not None \
else self._load_balancing_interval * 6
self._load_balancing_strategy = load_balancing_strategy or LoadBalancingStrategy.GREEDY
self._tasks = {} # type: Dict[str, asyncio.Task]
self._partition_contexts = {} # type: Dict[str, PartitionContext]
self._owner_level = owner_level
if checkpoint_store and self._owner_level is None:
self._owner_level = 0
self._prefetch = prefetch
self._track_last_enqueued_event_properties = (
track_last_enqueued_event_properties
)
self._id = str(uuid.uuid4())
self._internal_kwargs = get_dict_with_loop_if_needed(loop)
self._running = False
self._consumers = {} # type: Dict[str, EventHubConsumer]
self._ownership_manager = OwnershipManager(
cast("EventHubConsumerClient", self._eventhub_client),
self._consumer_group,
self._id,
self._checkpoint_store,
self._ownership_timeout,
self._load_balancing_strategy,
self._partition_id,
)
def __repr__(self) -> str:
return "EventProcessor: id {}".format(self._id)
async def _cancel_tasks_for_partitions(
self, to_cancel_partitions: Iterable[str]
) -> None:
_LOGGER.debug(
"EventProcessor %r tries to cancel partitions %r",
self._id,
to_cancel_partitions
)
for partition_id in to_cancel_partitions:
task = self._tasks.get(partition_id)
if task:
task.cancel()
_LOGGER.info(
"EventProcessor %r has cancelled partition %r",
self._id,
partition_id
)
if partition_id not in self._consumers: # task is cancelled before the consumer is created
del self._tasks[partition_id]
def _create_tasks_for_claimed_ownership(
self,
claimed_partitions: Iterable[str],
checkpoints: Optional[Dict[str, Dict[str, Any]]] = None,
) -> None:
_LOGGER.debug(
"EventProcessor %r tries to claim partition %r",
self._id,
claimed_partitions
)
for partition_id in claimed_partitions:
if partition_id not in self._tasks or self._tasks[partition_id].done():
checkpoint = checkpoints.get(partition_id) if checkpoints else None
if self._running:
self._tasks[partition_id] = get_running_loop().create_task(
self._receive(partition_id, checkpoint)
)
_LOGGER.info(
"EventProcessor %r has claimed partition %r",
self._id,
partition_id
)
async def _process_error(
self, partition_context: PartitionContext, err: Exception
) -> None:
if self._error_handler:
try:
await self._error_handler(partition_context, err)
except Exception as err_again: # pylint:disable=broad-except
_LOGGER.warning(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r. "
"An error occurred while running on_error. The exception is %r.",
self._id,
partition_context.eventhub_name,
partition_context.partition_id,
partition_context.consumer_group,
err_again,
)
async def _close_partition(
self, partition_context: PartitionContext, reason: CloseReason
) -> None:
_LOGGER.info(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r"
" is being closed. Reason is: %r",
self._id,
partition_context.eventhub_name,
partition_context.partition_id,
partition_context.consumer_group,
reason,
)
|
Azure/azure-sdk-for-python
|
sdk/managementpartner/azure-mgmt-managementpartner/azure/mgmt/managementpartner/models/_models.py
|
Python
|
mit
| 7,226
| 0.00083
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Error(msrest.serialization.Model):
"""this is the management partner operations error.
:param error: this is the ExtendedErrorInfo property.
:type error: ~azure.mgmt.managementpartner.models.ExtendedErrorInfo
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ExtendedErrorInfo'},
}
def __init__(
self,
**kwargs
):
super(Error, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class ExtendedErrorInfo(msrest.serialization.Model):
"""this is the extended error info.
:param code: this is the error response code. Possible values include: "NotFound", "Conflict",
"BadRequest".
:type code: str or ~azure.mgmt.managementpartner.models.ErrorResponseCode
:param message: this is the extended error info message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExtendedErrorInfo, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class OperationDisplay(msrest.serialization.Model):
"""this is the management partner operation.
:param provider: the is management partner provider.
:type provider: str
:param resource: the is management partner resource.
:type resource: str
:param operation: the is management
|
partner operation.
:type operation: str
:param description: the is management partner operation description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type':
|
'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class OperationList(msrest.serialization.Model):
"""this is the management partner operations list.
:param value: this is the operation response list.
:type value: list[~azure.mgmt.managementpartner.models.OperationResponse]
:param next_link: Url to get the next page of items.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[OperationResponse]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class OperationResponse(msrest.serialization.Model):
"""this is the management partner operations response.
:param name: this is the operation response name.
:type name: str
:param display: this is the operation display.
:type display: ~azure.mgmt.managementpartner.models.OperationDisplay
:param origin: the is operation response origin information.
:type origin: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationResponse, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
class PartnerResponse(msrest.serialization.Model):
"""this is the management partner operations response.
Variables are only populated by the server, and will be ignored when sending a request.
:param etag: Type of the partner.
:type etag: int
:ivar id: Identifier of the partner.
:vartype id: str
:ivar name: Name of the partner.
:vartype name: str
:ivar type: Type of resource. "Microsoft.ManagementPartner/partners".
:vartype type: str
:param partner_id: This is the partner id.
:type partner_id: str
:param partner_name: This is the partner name.
:type partner_name: str
:param tenant_id: This is the tenant id.
:type tenant_id: str
:param object_id: This is the object id.
:type object_id: str
:param version: This is the version.
:type version: int
:param updated_time: This is the DateTime when the partner was updated.
:type updated_time: ~datetime.datetime
:param created_time: This is the DateTime when the partner was created.
:type created_time: ~datetime.datetime
:param state: This is the partner state. Possible values include: "Active", "Deleted".
:type state: str or ~azure.mgmt.managementpartner.models.ManagementPartnerState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'etag': {'key': 'etag', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'partner_id': {'key': 'properties.partnerId', 'type': 'str'},
'partner_name': {'key': 'properties.partnerName', 'type': 'str'},
'tenant_id': {'key': 'properties.tenantId', 'type': 'str'},
'object_id': {'key': 'properties.objectId', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'int'},
'updated_time': {'key': 'properties.updatedTime', 'type': 'iso-8601'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'state': {'key': 'properties.state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PartnerResponse, self).__init__(**kwargs)
self.etag = kwargs.get('etag', None)
self.id = None
self.name = None
self.type = None
self.partner_id = kwargs.get('partner_id', None)
self.partner_name = kwargs.get('partner_name', None)
self.tenant_id = kwargs.get('tenant_id', None)
self.object_id = kwargs.get('object_id', None)
self.version = kwargs.get('version', None)
self.updated_time = kwargs.get('updated_time', None)
self.created_time = kwargs.get('created_time', None)
self.state = kwargs.get('state', None)
|
SonienTaegi/CELLAR
|
Browser/views/__init__.py
|
Python
|
gpl-2.0
| 672
| 0.013393
|
from django.http.response import HttpResponse
from django.shortcuts import render_to_response, render
from Browser.models import
|
UserInfo
|
from Browser.views import cellar, administrator
def simple_response(request, *args, **kwargs):
template_name = kwargs["path"]
if kwargs["type"] :
template_name = kwargs["type"] + "/" + template_name
userInfo = UserInfo.getUserInfo(request)
context = {
"isMetic" : userInfo.isMetic(),
"isYeoman" : userInfo.isYeoman(),
"isAdmin" : userInfo.isAdmin(),
"isSuper" : userInfo.isSuper()
}
return HttpResponse(render(request, template_name, context))
|
brandsoulmates/incubator-airflow
|
airflow/operators/aws_emr_operator.py
|
Python
|
apache-2.0
| 5,997
| 0.001834
|
import logging
import shlex
import subprocess
import json
from airflow.hooks.aws_emr import EMRHook
from airflow.hooks.S3_hook import S3Hook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.exceptions import AirflowException
from slackclient import SlackClient
from time import sleep
import os
class AwsEMROperator(BaseOperator):
ui_color = '#00BFFF'
sc = None
@apply_defaults
def __init__(
self,
event_xcoms=None,
aws_emr_conn_id='aws_default',
xcom_push=True,
command_args=[[]],
channel="#airflow_notifications",
download_these_files=[],
start_cluster=False,
terminate_cluster=False,
xcom_task_id="job_runner",
dn_dir="./tmp",
username='airflow',
method='chat.postMessage',
icon_url='https://raw.githubusercontent.com/airbnb/airflow/master/airflow/www/static/pin_100.png',
*args,
**kwargs):
"""
Start by just invoking something.
"""
super(AwsEMROperator, self).__init__(*args, **kwargs)
self.channel = channel
self.username = username
self.icon_url = icon_url
self.download_these_files = download_these_files
self.conn_id = aws_emr_conn_id
self.method = 'chat.postMessage'
self.command_args = command_args
self.start_cluster = start_cluster
self.terminate_cluster = terminate_cluster
self.dn_dir = dn_dir
self.xcom_task_id = xcom_task_id
def slack_connect(self):
self.sc = SlackClient(self.token)
def slack_message(self, text):
self.token = os.environ["SLACK_API_TOKEN"]
if not self.sc:
self.slack_connect()
api_params = {
'channel': self.channel,
'username': self.username,
'text': text,
'icon_url': self.icon_url,
'link_names': 1
}
self.sc.api_call(self.method, **api_params)
def construct_command(self):
command = "aws emr create-cluster"
for key, value in self.command_args:
command = command + " " + key + " " + value
logging.info("Command is: " + command)
return shlex.split(command)
def exec_command(self, command):
p = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if stderr != b'':
logging.info("Non zero exit code.")
logging.info(stderr)
raise AirflowException("The return code is non zero: " +
stderr)
print(stdout)
print(type(stdout))
try:
output = json.loads(stdout.replace("\n", ""))["ClusterId"]
except TypeError:
output = json.loads(stdout.decode("utf-8")\
.replace("\n",""))["ClusterId"]
|
logging.info("output_id: " + output)
return output
def execute(self, context):
s3_hook = S3Hook()
for bucket, key in self.download_these_files:
print(bucket)
print(key)
basename = os.path.basename(key)
prin
|
t(basename)
print(os.path.join(self.dn_dir, basename))
local_path = os.path.join(self.dn_dir, basename)
s3_hook.download_file(bucket, key, local_path)
job_monitor = EMRHook(emr_conn_id="S3_default")
if self.start_cluster:
output_id = self.exec_command(self.construct_command())
context['ti'].xcom_push(key="code", value=output_id)
if self.terminate_cluster:
output_id = context['ti'].xcom_pull(
task_id=self.xcom_task_id, key="code")
self.slack_message("""
@channel\n ----------------------------------------\nThe Cluster is being terminated for this job. \n ----------------------------------------\nProcess id = %s
""" % output_id)
job_monitor.terminate_job(output_id)
self.slack_message("""
@channel
The task Id of the new job is: %s
""" %
output_id)
while True:
if job_monitor.isRunning(output_id):
sleep(900)
elif job_monitor.isSuccessfull(output_id):
self.slack_message("""
@channel\n ----------------------------------------\nThe process is Successful.\n Manual check is always a good thing. \n ----------------------------------------\nProcess id = %s
""" % output_id)
break
elif job_monitor.isTerminated(output_id):
self.slack_message("""
@channel\n ----------------------------------------\nThe process has been terminated\n ----------------------------------------\nProcess id = %s
""" % output_id)
raise AirflowException("The process is terminated")
elif job_monitor.isWaiting(output_id):
self.slack_message("""
@channel\n ----------------------------------------\nThe process is WAITING\n ----------------------------------------\nProcess id = %s
""" % output_id)
raise AirflowException(
"Somebody needs to go see whats up. Spark Job is in Waiting State for id: %s" % output_id)
else:
sleep(300)
# def slack_message():
# token = os.environ["SLACK_API_TOKEN"]
# sc = SlackClient(token)
# api_params = {
# 'channel': "airflow_notifications",
# 'username': "airflow",
# 'text': "ssup @channel",
# 'icon_url': 'https://raw.githubusercontent.com/airbnb/airflow/master/airflow/www/static/pin_100.png',
# 'link_names': 1
# }
# sc.api_call("chat.postMessage", **api_params)
|
chainer/chainercv
|
tests/links_tests/model_tests/deeplab_tests/test_aspp.py
|
Python
|
mit
| 975
| 0
|
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.deeplab import SeparableASPP
class TestSeparableASPP(unittest.TestCase):
def setUp(self):
|
self.in_channels = 128
self.out_channels = 32
self.link = SeparableASPP(
self.in_channels, self.out_channels)
def check_call(self):
xp = self.link.xp
x = chainer.Variable(xp.random.uniform(
low=-1, high=1, size=(2, self.in_channels, 64, 64)
).astype(xp.float32))
y = self.link(x)
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.data, xp.ndarray)
|
self.assertEqual(y.shape, (2, self.out_channels, 64, 64))
@attr.slow
def test_call_cpu(self):
self.check_call()
@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
self.check_call()
testing.run_module(__name__, __file__)
|
grengojbo/st2
|
st2client/st2client/formatters/table.py
|
Python
|
apache-2.0
| 8,219
| 0.001095
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import logging
from prettytable import PrettyTable
from six.moves import zip
from st2client import formatters
from st2client.utils import strutil
from st2client.utils.terminal import get_terminal_size
LOG = logging.getLogger(__name__)
# Minimum width for the ID to make sure the ID column doesn't wrap across
# multiple lines
MIN_ID_COL_WIDTH = 26
DEFAULT_ATTRIBUTE_DISPLAY_ORDER = ['id', 'name', 'pack', 'description']
class MultiColumnTable(formatters.Formatter):
@classmethod
def format(cls, entries, *args, **kwargs):
attributes = kwargs.get('attributes', [])
attribute_transform_functions = kwargs.get('attribute_transform_functions', {})
widths = kwargs.get('widths', [])
widths = widths or []
if not widths and attributes:
# Dynamically calculate column size based on the terminal size
lines, cols = get_terminal_size()
if attributes[0] == 'id':
# consume iterator and save as entries so collection is accessible later.
entries = [e for e in entries]
# first column contains id, make sure it's not broken up
first_col_width = cls._get_required_column_width(values=[e.id for e in entries],
minimum_width=MIN_ID_COL_WIDTH)
cols = (cols - first_col_width)
col_width = int(math.floor((cols / len(attributes))))
else:
col_width = int(math.floor((cols / len(attributes))))
first_col_width = col_width
widths = []
for index in range(0, len(attributes)):
if index == 0:
widths.append(first_col_width)
else:
widths.append(col_width)
if not attributes or 'all' in attributes:
attributes = sorted([attr for attr in entries[0].__dict__
if not attr.startswith('_')])
# Determine table format.
if len(attributes) == len(widths):
# Customize width for each column.
columns = zip(attributes, widths)
else:
# If only 1 width value is provided then
# apply it to all columns else fix at 28.
width = widths[0] if len(widths) == 1 else 28
columns = zip(attributes,
[width for i in range(0, len(attributes))])
# Format result to table.
table = PrettyTable()
for column in columns:
table.field_names.append(column[0])
table.max_width[column[0]] = column[1]
table.padding_width = 1
table.align = 'l'
table.valign = 't'
for entry in entries:
# TODO: Improve getting values of nested dict.
values = []
for field_name in table.field_names:
if '.' in field_name:
field_names = field_name.split('.')
value = getattr(entry, field_names.pop(0), {})
for name in field_names:
value = cls._get_field_value(value, name)
if type(value) is str:
break
value = strutil.unescape(value)
values.append(value)
else:
value = cls._get_simple_field_value(entry, field_name)
transform_function = attribute_transform_functions.get(field_name,
lambda value: value)
value = transform_function(value=value)
value = strutil.unescape(value)
values.append(value)
table.add_row(values)
return table
@staticmethod
def _get_simple_field_value(entry, field_name):
"""
Format a value for a simple field.
"""
value = getattr(entry, field_name, '')
if isinstance(value, (list, tuple)):
if len(value) == 0:
value = ''
elif isinstance(value[0], (str, unicode)):
# List contains simple string values, format it as comma
# separated string
value = ', '.join(value)
return value
@staticmethod
def _get_field_value(value, field_name):
r_val = value.get(field_name, None)
if r_val is None:
return ''
if isinstance(r_val, list) or isinstance(r_val, dict):
return r_val if len(r_val) > 0 else ''
return r_val
@staticmethod
def _get_friendly_column_name(name):
if not name:
return None
friendly_name = name.replace('_', ' ').replace('.', ' ').capitalize()
return friendly_name
@staticmethod
def _get_required_column_width(values, minimum_width=0):
max_width = len(max(values, key=len)) if values else minimum_width
return max_width if max_width > minimum_width else minimum_width
class PropertyValueTable(formatters.Formatter):
@classmethod
def format(cls, subject, *args, **kwargs):
attributes = kwargs.get('attributes', None)
attribute_display_order = kwargs.get('attribute_display_order',
DEFAULT_ATTRIBUTE_DISPLAY_ORDER)
attribute_transform_functions = kwargs.get('attribute_transform_functions', {})
if not attributes or 'all' in attributes:
attributes = sorted([attr for attr in subject.__dict__
if not attr.startswith('_')])
for attr in attribute_display_order[::-1]:
if attr in attributes:
attributes.remove(attr)
attributes = [attr] + attributes
table = PrettyTable()
table.field_names = ['Property', 'Value']
table.max_width['Property'] = 20
table.max_width['Value'] = 60
table.padding_width = 1
table.align = 'l'
table.valign = 't'
for attribute in attributes:
if '.' in attribute:
field_names = attribute.split('.')
value = cls._get_attribute_value(subject, field_names.pop(0))
for name in field_names:
value = cls._get_attribute_value(value, name)
if type(value) is str:
break
else:
value = cls._get_attribute_value(subject, attribute)
transform_function = attribute_transform_functions.get(attribute,
lambda value: value)
value = transform_function(value=value)
if type(value) is dict or type(value) is list:
value = json.dumps(value, indent=4)
value = strutil.unescape(value)
table.add_row([attribute, value])
return table
@staticmethod
def _
|
get_attribute_value(subject, attribute):
if isins
|
tance(subject, dict):
r_val = subject.get(attribute, None)
else:
r_val = getattr(subject, attribute, None)
if r_val is None:
return ''
if isinstance(r_val, list) or isinstance(r_val, dict):
return r_val if len(r
|
diofant/diofant
|
diofant/combinatorics/permutations.py
|
Python
|
bsd-3-clause
| 72,579
| 0.000096
|
import functools
import random
from collections import defaultdict
from mpmath.libmp.libintmath import ifac
from ..core import Basic, Tuple, sympify
from ..core.compatibility import as_int, is_sequence
from ..matrices import zeros
from ..polys import lcm
from ..utilities import flatten, has_dups, has_variety
from ..utilities.iterables import minlex, runs
def _af_rmul(a, b):
"""
Return the product b*a; input and output are array forms. The ith value
is a[b[i]].
Examples
========
>>> Permutation.print_cyclic = False
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> _af_rmul(a, b)
[1, 2, 0]
>>> [a[b[i]] for i in range(3)]
[1, 2, 0]
This handles the operands in reverse order compared to the ``*`` operator:
>>> a = Permutation(a)
>>> b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i i
|
n range(3)]
[2, 0, 1]
See Also
========
rmul, _af_rmuln
"""
return [a[i] for i in b]
def _af_rmuln(*abc):
"""
Given [a, b, c, ...] return the product of ...*c*b*a using array forms.
The ith value is a[b[c[i]]].
Examples
========
>>> Permutation.print_cyclic = False
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> _af_rmul(a, b)
[1, 2, 0]
>>> [a[b[i]] for i in range(3)]
|
[1, 2, 0]
This handles the operands in reverse order compared to the ``*`` operator:
>>> a = Permutation(a)
>>> b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
See Also
========
rmul, _af_rmul
"""
a = abc
m = len(a)
if m == 3:
p0, p1, p2 = a
return [p0[p1[i]] for i in p2]
if m == 4:
p0, p1, p2, p3 = a
return [p0[p1[p2[i]]] for i in p3]
if m == 5:
p0, p1, p2, p3, p4 = a
return [p0[p1[p2[p3[i]]]] for i in p4]
if m == 6:
p0, p1, p2, p3, p4, p5 = a
return [p0[p1[p2[p3[p4[i]]]]] for i in p5]
if m == 7:
p0, p1, p2, p3, p4, p5, p6 = a
return [p0[p1[p2[p3[p4[p5[i]]]]]] for i in p6]
if m == 8:
p0, p1, p2, p3, p4, p5, p6, p7 = a
return [p0[p1[p2[p3[p4[p5[p6[i]]]]]]] for i in p7]
if m == 1:
return a[0][:]
if m == 2:
a, b = a
return [a[i] for i in b]
if m == 0:
raise ValueError('String must not be empty')
p0 = _af_rmuln(*a[:m//2])
p1 = _af_rmuln(*a[m//2:])
return [p0[i] for i in p1]
def _af_parity(pi):
"""
Computes the parity of a permutation in array form.
The parity of a permutation reflects the parity of the
number of inversions in the permutation, i.e., the
number of pairs of x and y such that x > y but p[x] < p[y].
Examples
========
>>> _af_parity([0, 1, 2, 3])
0
>>> _af_parity([3, 2, 0, 1])
1
See Also
========
Permutation
"""
n = len(pi)
a = [0] * n
c = 0
for j in range(n):
if a[j] == 0:
c += 1
a[j] = 1
i = j
while pi[i] != j:
i = pi[i]
a[i] = 1
return (n - c) % 2
def _af_invert(a):
"""
Finds the inverse, ~A, of a permutation, A, given in array form.
Examples
========
>>> A = [1, 2, 0, 3]
>>> _af_invert(A)
[2, 0, 1, 3]
>>> _af_rmul(_, A)
[0, 1, 2, 3]
See Also
========
Permutation, __invert__
"""
inv_form = [0] * len(a)
for i, ai in enumerate(a):
inv_form[ai] = i
return inv_form
def _af_pow(a, n):
"""
Routine for finding powers of a permutation.
Examples
========
>>> Permutation.print_cyclic = False
>>> p = Permutation([2, 0, 3, 1])
>>> p.order()
4
>>> _af_pow(p._array_form, 4)
[0, 1, 2, 3]
"""
if n == 0:
return list(range(len(a)))
if n < 0:
return _af_pow(_af_invert(a), -n)
if n == 1:
return a[:]
elif n == 2:
b = [a[i] for i in a]
elif n == 3:
b = [a[a[i]] for i in a]
elif n == 4:
b = [a[a[a[i]]] for i in a]
else:
# use binary multiplication
b = list(range(len(a)))
while 1:
if n & 1:
b = [b[i] for i in a]
n -= 1
if not n:
break
if n % 4 == 0:
a = [a[a[a[i]]] for i in a]
n = n // 4
elif n % 2 == 0:
a = [a[i] for i in a]
n = n // 2
return b
def _af_commutes_with(a, b):
"""
Checks if the two permutations with array forms
given by ``a`` and ``b`` commute.
Examples
========
>>> _af_commutes_with([1, 2, 0], [0, 2, 1])
False
See Also
========
Permutation, commutes_with
"""
return not any(a[b[i]] != b[a[i]] for i in range(len(a) - 1))
class Cycle(dict):
"""
Wrapper around dict which provides the functionality of a disjoint cycle.
A cycle shows the rule to use to move subsets of elements to obtain
a permutation. The Cycle class is more flexible than Permutation in
that 1) all elements need not be present in order to investigate how
multiple cycles act in sequence and 2) it can contain singletons:
A Cycle will automatically parse a cycle given as a tuple on the rhs:
>>> Cycle(1, 2)(2, 3)
Cycle(1, 3, 2)
The identity cycle, Cycle(), can be used to start a product:
>>> Cycle()(1, 2)(2, 3)
Cycle(1, 3, 2)
The array form of a Cycle can be obtained by calling the list
method (or passing it to the list function) and all elements from
0 will be shown:
>>> a = Cycle(1, 2)
>>> a.list()
[0, 2, 1]
>>> list(a)
[0, 2, 1]
If a larger (or smaller) range is desired use the list method and
provide the desired size -- but the Cycle cannot be truncated to
a size smaller than the largest element that is out of place:
>>> b = Cycle(2, 4)(1, 2)(3, 1, 4)(1, 3)
>>> b.list()
[0, 2, 1, 3, 4]
>>> b.list(b.size + 1)
[0, 2, 1, 3, 4, 5]
>>> b.list(-1)
[0, 2, 1]
Singletons are not shown when printing with one exception: the largest
element is always shown -- as a singleton if necessary:
>>> Cycle(1, 4, 10)(4, 5)
Cycle(1, 5, 4, 10)
>>> Cycle(1, 2)(4)(5)(10)
Cycle(1, 2)(10)
The array form can be used to instantiate a Permutation so other
properties of the permutation can be investigated:
>>> Perm(Cycle(1, 2)(3, 4).list()).transpositions()
[(1, 2), (3, 4)]
Notes
=====
The underlying structure of the Cycle is a dictionary and although
the __iter__ method has been redefined to give the array form of the
cycle, the underlying dictionary items are still available with the
such methods as items():
>>> list(Cycle(1, 2).items())
[(1, 2), (2, 1)]
See Also
========
Permutation
"""
def __missing__(self, arg):
"""Enter arg into dictionary and return arg."""
arg = as_int(arg)
self[arg] = arg
return arg
def __iter__(self):
for i in self.list():
yield i
def __call__(self, *other):
"""Return product of cycles processed from R to L.
Examples
========
>>> Cycle(1, 2)(2, 3)
Cycle(1, 3, 2)
An instance of a Cycle will automatically parse list-like
objects and Permutations that are on the right. It is more
flexible than the Permutation in that all elements need not
be present:
>>> a = Cycle(1, 2)
>>> a(2, 3)
Cycle(1, 3, 2)
>>> a(2, 3)(4, 5)
Cycle(1, 3, 2)(4, 5)
"""
rv = Cycle(*other)
for k, v in zip(list(self.keys()), [rv[v] for v in self.values()]):
rv[k] = v
return rv
def list(self, size=None):
"""Return the cycles as an explicit list starting from 0 up
to the greater of the largest value in the cycles and size.
Truncation of trailing unmoved items will occur when size
is less than the maximum element in the cycle; if this is
desired, setting ``
|
Martin09/E-BeamPatterns
|
100 Wafers - 1cm Squares/Multi-Use Pattern/v1.2/MembraneDesign_100Wafer_v1.1.py
|
Python
|
gpl-3.0
| 17,018
| 0.002585
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 18 14:11:31 2015
@author: Martin Friedl
"""
from datetime import date
import numpy as np
from Patterns.GrowthTheoryCell import make_theory_cell
from Patterns.GrowthTheoryCell_100_3BranchDevices import make_theory_cell_3br
from Patterns.GrowthTheoryCell_100_4BranchDevices import make_theory_cell_4br
from gdsCAD_py3.core import Cell, Boundary, CellArray, Layout, Path
from gdsCAD_py3.shapes import Box, Rectangle, Label
from gdsCAD_py3.templates100 import Wafer_GridStyle, dashed_line
WAFER_ID = 'XXXX' # CHANGE THIS FOR EACH DIFFERENT WAFER
PATTERN = 'SQ1.2'
putOnWafer = True # Output full wafer or just a single pattern?
HighDensity = False # High density of triangles?
glbAlignmentMarks = False
tDicingMarks = 10. # Dicing mark line thickness (um)
rotAngle = 0. # Rotation angle of the membranes
wafer_r = 25e3
waferVer = '100 Membranes Multi-Use v1.2'.format(int(wafer_r / 1000))
waferLabel = waferVer + '\n' + date.today().strftime("%d%m%Y")
# Layers
l_smBeam = 0
l_lgBeam = 1
l_drawing = 100
# %% Wafer template for MBE growth
class MBE100Wafer(Wafer_GridStyle):
"""
A 2" wafer divided into square cells
"""
def __init__(self, name, cells=None):
Wafer_GridStyle.__init__(self, name=name, cells=cells, block_gap=1200.)
# The placement of the wafer alignment markers
am_x = 1.5e4
am_y = 1.5e4
self.align_pts = np.array([am_x, am_y])
self.align_pts = np.vstack((self.align_pts, self.align_pts *
(-1, 1))) # Reflect about y-axis
self.align_pts = np.vstack((self.align_pts, self.align_pts *
(1, -1))) # Reflect about x-axis
self.wafer_r = 25e3
self.block_size = np.array([10e3, 10e3])
self._place_blocks(radius=self.wafer_r + 5e3)
# if glbAlignmentMarks:
# self.add_aligment_marks(l_lgBeam)
# self.add_orientation_text(l_lgBeam)
# self.add_dicing_marks() # l_lgBeam, mkWidth=mkWidth Width of dicing marks
self.add_blocks()
self.add_wafer_outline(layers=l_drawing)
self.add_dashed_dicing_marks(layers=[l_lgBeam])
self.add_block_labels(layers=[l_lgBeam])
self.add_prealignment_markers(layers=[l_lgBeam])
self.add_tem_membranes([0.08, 0.012, 0.028, 0.044], 2000, 1, l_smBeam)
self.add_theory_cells()
self.add_chip_labels()
# self.add_blockLabels(l_lgBeam)
# self.add_cellLabels(l_lgBeam)
bottom = np.array([0, -self.wafer_r * 0.9])
# top = np.array([0, -1]) * bottom
self.add_waferLabel(waferLabel, l_drawing, pos=bottom)
def add_block_labels(self, layers):
txtSize = 800
for (i, pt) in enumerate(self.block_pts):
origin = (pt + np.array([0.5, 0.5])) * self.block_size
blk_lbl = self.blockcols[pt[0]] + self.blockrows[pt[1]]
for l in layers:
txt = Label(blk_lbl, txtSize, layer=l)
bbox = txt.bounding_box
offset = np.array(pt)
txt.translate(-np.mean(bbox, 0)) # Center text around origin
lbl_cell = Cell("lbl_" + blk_lbl)
lbl_cell.add(txt)
origin += np.array([0, 0])
self.add(lbl_cell, origin=origin)
def add_dashed_dicing_marks(self, layers):
if type(layers) is not list:
layers = [layers]
width = 10. / 2
dashlength = 2000
r = self.wafer_r
rng = np.floor(self.wafer_r / self.block_size).astype(int)
dmarks = Cell('DIC_MRKS')
for l in layers:
for x in np.arange(-rng[0], rng[0] + 1) * self.block_size[0]:
y = np.sqrt(r ** 2 - x ** 2)
vm = dashed_line([x, y], [x, -y], dashlength, width, layer=l)
dmarks.add(vm)
for y in np.arange(-rng[1], rng[1] + 1) * self.block_size[1]:
x = np.sqrt(r ** 2 - y ** 2)
hm = dashed_line([x, y], [-x, y], dashlength, width, layer=l)
dmarks.add(hm)
self.add(dmarks)
def add_prealignment_markers(self, layers, mrkr_size=7):
if mrkr_size % 2 == 0: # Number is even, but we need odd numbers
mrkr_size += 1
if type(layers) is not list:
layers = [layers]
for l in layers:
rect_size = 10. # 10 um large PAMM rectangles
marker_rect = Rectangle([-rect_size / 2., -rect_size / 2.], [rect_size / 2., rect_size / 2.], layer=l)
marker = Cell('10umMarker')
marker.add(marker_rect)
# Make one arm of the PAMM array
marker_arm = Cell('PAMM_Arm')
# Define the positions of the markers, they increase in spacing by 1 um each time:
mrkr_positions = [75 * n + (n - 1) * n // 2 for n in range(1, (mrkr_size - 1) // 2 + 1)]
for pos in mrkr_positions:
marker_arm.add(marker, origin=[pos, 0])
# Build the final PAMM Marker
pamm_cell = Cell('PAMM_Marker')
pamm_cell.add(marker) # Center marker
pamm_cell.add(marker_arm) # Right arm
pamm_cell.add(marker_arm, rotation=180) # Left arm
pamm_cell.add(marker_arm, rotation=90) # Top arm
pamm_cell.add(marker_arm, rotation=-90) # Bottom arm
for pos in mrkr_
|
positions:
pamm_cell.add(marker_arm, origin=[pos, 0], rotation=90) # Top arms
pamm_cell.add(marker_arm, origin=[-pos, 0], rotation=90)
pamm_cell.add(marker_arm, origin=[pos, 0], rotation=-90) # Bottom arms
pamm_cell.add(marker_arm, origin=[-p
|
os, 0], rotation=-90)
# Make the 4 tick marks that mark the center of the array
h = 30.
w = 100.
tick_mrk = Rectangle([-w / 2., -h / 2.], [w / 2, h / 2.], layer=l)
tick_mrk_cell = Cell("TickMark")
tick_mrk_cell.add(tick_mrk)
pos = mrkr_positions[-1] + 75 + w / 2.
pamm_cell.add(tick_mrk_cell, origin=[pos, 0])
pamm_cell.add(tick_mrk_cell, origin=[-pos, 0])
pamm_cell.add(tick_mrk_cell, origin=[0, pos], rotation=90)
pamm_cell.add(tick_mrk_cell, origin=[0, -pos], rotation=90)
center_x, center_y = (5000, 5000)
for block in self.blocks:
block.add(pamm_cell, origin=(center_x + 2000, center_y))
block.add(pamm_cell, origin=(center_x - 2000, center_y))
def add_tem_membranes(self, widths, length, pitch, layer):
tem_membranes = Cell('TEM_Membranes')
n = 5
curr_y = 0
for width in widths:
membrane = Path([(-length / 2., 0), (length / 2., 0)], width=width, layer=layer)
membrane_cell = Cell('Membrane_w{:.0f}'.format(width * 1000))
membrane_cell.add(membrane)
membrane_array = CellArray(membrane_cell, 1, n, (0, pitch))
membrane_array_cell = Cell('MembraneArray_w{:.0f}'.format(width * 1000))
membrane_array_cell.add(membrane_array)
tem_membranes.add(membrane_array_cell, origin=(0, curr_y))
curr_y += n * pitch
n2 = 3
tem_membranes2 = Cell('Many_TEM_Membranes')
tem_membranes2.add(CellArray(tem_membranes, 1, n2, (0, n * len(widths) * pitch)))
center_x, center_y = (5000, 5000)
for block in self.blocks:
block.add(tem_membranes2, origin=(center_x, center_y + 2000))
def add_theory_cells(self):
theory_cells = Cell('TheoryCells')
theory_cells.add(make_theory_cell(wafer_orient='100'), origin=(-400, 0))
theory_cells.add(make_theory_cell_3br(), origin=(0, 0))
theory_cells.add(make_theory_cell_4br(), origin=(400, 0))
center_x, center_y = (5000, 5000)
for block in self.blocks:
block.add(theory_cells, origin=(center_x, center_y - 2000))
def add_chip_labels(self):
wafer_lbl = PATTERN + '\n' + WAFER_ID
text = Label(wafer_lbl, 20., layer=l_lgBeam)
text.translate(tuple(np.array(-text.bound
|
jrmi/pypeman
|
pypeman/tests/settings/test_settings_sqlite_persist.py
|
Python
|
apache-2.0
| 164
| 0.006098
|
"""
Persist
|
ence configuration
"""
|
PERSISTENCE_BACKEND = 'pypeman.persistence.SqliteBackend'
PERSISTENCE_CONFIG = {"path":'/tmp/to_be_removed_849827198746.sqlite'}
|
blckshrk/Weboob
|
modules/minutes20/test.py
|
Python
|
agpl-3.0
| 982
| 0
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
__all__ = ['Minutes20Test']
cl
|
ass Minutes20Test(Back
|
endTest):
BACKEND = 'minutes20'
def test_new_messages(self):
for message in self.backend.iter_unread_messages():
pass
|
andrewbird/wader
|
plugins/devices/zte_mf180.py
|
Python
|
gpl-2.0
| 1,826
| 0.001644
|
# -*- coding: utf-8 -*-
# Copyright (C) 2011-2012 Vodafone España, S.A.
# Author: Andrew Bird
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from wader.common.consts import WADER_CONNTYPE_USB
from core.hardware.zte import (ZTEWCDMADevicePlugin,
ZTEWCDMACustomizer,
ZTEWrapper)
class ZTEMF
|
180Wrapper(ZTEWrapper):
def send_ussd(self, ussd):
"""Sends the ussd command ``ussd``"""
# XXX: assumes it's the
|
same as 637U
# MF180 wants request in ascii chars even though current
# set might be ucs2
return super(ZTEMF180Wrapper, self).send_ussd(ussd, force_ascii=True)
class ZTEMF180Customizer(ZTEWCDMACustomizer):
wrapper_klass = ZTEMF180Wrapper
class ZTEMF180(ZTEWCDMADevicePlugin):
""":class:`~core.plugin.DevicePlugin` for ZTE's MF180"""
name = "ZTE MF180"
version = "0.1"
author = u"Andrew Bird"
custom = ZTEMF180Customizer()
__remote_name__ = "MF180"
__properties__ = {
'ID_VENDOR_ID': [0x19d2],
'ID_MODEL_ID': [0x2003],
}
conntype = WADER_CONNTYPE_USB
zte_mf180 = ZTEMF180()
|
bh/python-ddns-zones-updater
|
tests/test_core.py
|
Python
|
gpl-2.0
| 1,816
| 0
|
from __future__ import absolute_import, unicode_literals
import mock
import pytest
from ddns_zones_updater.configreader import ConfigReader
from ddns_zones_updater.core import DDNSZoneUpdater
@pytest.fixture
def fake_config_reader_with_two_hosts():
host_1 = mock.Mock(do_update=mock.Mock())
host_2 = mock.Mock(do_update=mock.Mock())
class FakeHostManager(mock.Mock):
__iter__ = mock.Mock(return_value=(h for h in [host_1, host_2]))
class FakeConfigReader(mock.Mock):
hosts = FakeHostManager()
return [host_1, host_2], FakeConfigReader
|
()
@pytest.fixture
def
|
updater_without_calling_init(request):
patcher = mock.patch.object(DDNSZoneUpdater, "__init__", return_value=None)
patcher.start()
request.addfinalizer(patcher.stop)
return DDNSZoneUpdater("path/to/config.ini")
@mock.patch.object(ConfigReader, "read")
@mock.patch.object(ConfigReader, "__init__", return_value=None)
def test_initializer(mock_init, mock_read):
DDNSZoneUpdater("/tmp/foo.ini")
mock_init.assert_called_once_with("/tmp/foo.ini")
mock_read.assert_called_once_with()
def test_get_current_wan_ip(updater_without_calling_init):
updater = updater_without_calling_init
with mock.patch("ipgetter.myip", return_value="149.0.0.31") as mock_my_ip:
assert updater.current_wan_ip() == "149.0.0.31"
mock_my_ip.assert_called_once_with()
def test_run(updater_without_calling_init, fake_config_reader_with_two_hosts):
updater = updater_without_calling_init
hosts, updater.config = fake_config_reader_with_two_hosts
with mock.patch("ipgetter.myip", return_value="1.1.1.1") as mock_my_ip:
updater.run()
for host in hosts:
host.do_update.assert_called_once_with("1.1.1.1")
mock_my_ip.assert_called_once_with()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.0/Lib/test/test_urllibnet.py
|
Python
|
mit
| 6,937
| 0.001586
|
#!/usr/bin/env python
import unittest
from test import support
import socket
import urllib.request
import sys
import os
import email.message
def _open_with_retry(func, host, *args, **kwargs):
# Connecting to remote hosts is flaky. Make it more robust
# by retrying the connection several times.
last_exc = None
for i in range(3):
try:
return func(host, *args, **kwargs)
except IOError as err:
last_exc = err
continue
except:
raise
raise last_exc
class URLTimeoutTest(unittest.TestCase):
TIMEOUT = 10.0
def setUp(self):
socket.setdefaulttimeout(self.TIMEOUT)
def tearDown(self):
socket.setdefaulttimeout(None)
def testURLread(self):
f = _open_with_retry(urllib.request.urlopen, "http://www.python.org/")
x = f.read()
class urlopenNetworkTests(unittest.TestCase):
"""Tests urllib.reqest.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
setUp is not used for always constructing a connection to
http://www.python.org/ since there a few tests that don't use that address
and making a connection is expensive enough to warrant minimizing unneeded
connections.
"""
def urlopen(self, *args):
return _open_with_retry(urllib.request.urlopen, *args)
def test_basic(self):
# Simple test expected to pass.
open_url = self.urlopen("http://www.python.org/")
for attr in ("read", "readline", "readlines", "fileno", "close",
"info", "geturl"):
self.assert_(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
try:
self.assert_(open_url.read(), "calling 'read' failed")
finally:
open_url.close()
def test_readlines(self):
# Test both readline and readlines.
open_url = self.urlopen("http://www.python.org/")
try:
self.assert_(isinstance(open_url.readline(), bytes),
"readline did not return bytes")
self.assert_(isinstance(open_url.readlines(), list),
"readlines did not return a list")
finally:
open_url.close()
def test_info(self):
# Test 'info'.
open_url = self.urlopen("http://www.python.org/")
try:
info_obj = open_url.info()
finally:
open_url.close()
self.assert_(isinstance(info_obj, email.message.Message),
"object returned by 'info' is not an instance of "
"email.message.Message")
self.assertEqual(info_obj.get_content_subtype(), "html")
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
URL = "http://www.python.org/"
open_url = self.urlopen(URL)
try:
gotten_url = open_url.geturl()
finally:
open_url.close()
self.assertEqual(gotten_url, URL)
def test_getcode(self):
# test getcode() with the fancy opener to get 404 error codes
URL = "http://www.python.org/XXXinvalidXXX"
open_url = urllib.request.FancyURLopener().open(URL)
try:
code = open_url.getcode()
finally:
open_url.close()
self.assertEqual(code, 404)
def test_fileno(self):
if sys.platform in ('win32',):
# On Windows, socket handles are not file descriptors; this
# test can't pass on Windows.
return
# Make sure fd returned by fileno is valid.
open_url = self.urlopen("http://www.python.org/")
fd = open_url.fileno()
FILE = os.fdopen(fd, encoding='utf-8')
try:
self.assert_(FILE.read(), "reading from file created using fd "
"returned by fileno failed")
finally:
FILE.close()
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
self.assertRaises(IOError,
# SF patch 809915: In Sep 2003, VeriSign started
# highjacking invalid .com and .net addresses to
# boost traffic to their own site. This test
# started failing then. One hopes the .invalid
# domain will be spared to serve its defined
# purpose.
# urllib.urlopen, "http://www.sadflkjsasadf.com/")
urllib.request.urlopen,
"http://www.python.invalid./")
class urlretrieveNetworkTests(unittest.TestCase):
"""Tests urllib.request.urlretrieve using the network."""
def urlretrieve(self, *args):
return _open_with_retry(urllib.request.urlretrieve, *args)
def test_basic(self):
# Test basic functionality.
file_location,info = self.urlretrieve("http://www.python.org/")
self.assert_(os.path.exists(file_location), "file location returned by"
" urlretrieve is not a valid path")
FILE = open(file_location, encoding='utf-8')
try:
self.assert_(FILE.read(), "reading from the file location returned"
" by urlretrieve failed")
finally:
FILE.close()
os.unlink(file_location)
def test_specified_path(self):
# Make sure that specifying the location of the file to write to works.
file_location,info = self.urlretrieve("http://www.python.org/",
support.TESTFN)
self.assertEqual(file_location, support.TESTFN)
|
self.assert_(os.path.exists(file_location))
FILE = open(file_location, encoding='utf-8')
try:
self.assert_(FILE.read(), "reading from temporary file failed")
finally:
FILE.close()
os.unlink(file_location)
def test_header(self):
# Make sure header returned as 2nd value from urlretriev
|
e is good.
file_location, header = self.urlretrieve("http://www.python.org/")
os.unlink(file_location)
self.assert_(isinstance(header, email.message.Message),
"header is not an instance of email.message.Message")
def test_main():
support.requires('network')
support.run_unittest(URLTimeoutTest,
urlopenNetworkTests,
urlretrieveNetworkTests)
if __name__ == "__main__":
test_main()
|
HackCigriculture/cigriculture-ml
|
src/polygon.py
|
Python
|
gpl-3.0
| 2,316
| 0.002159
|
import json
with open('data/78mm.json', 'r') as _78mm:
polygons78 = json.load(_78mm)["features"][0]["geometry"]["geometries"]
with open('data/100mm.json', 'r') as _100mm:
polygons100 = json.load(_100mm)["features"][0]["geometry"]["geometries"]
with open('data/130mm.json', 'r') as _130mm:
polygons130 = json.load(_130mm)["features"][0]["geometry"]["geometries"]
def dot(x1, y1, x2, y2):
return x1*y1+x2*y2
def det(x1, y1, x2, y2):
return x1*y2-x2*y1
def dett(x0, y0, x1, y1, x2, y2):
z = det(x1-x0, y1-y0, x2-x0, y2-y0)
return -1 if z < 0 else z > 0
'''
inline DB ang(cPo p0,cPo p1){return acos(dot(p0,p1)/p0.len()/p1.len())
|
;}
def ang(x1, y1, x2, y2):
return
def arg(x1, y1, x2, y2):
DB a=ang(x,y);return~dett(x,y)?a:2*PI-a;}
return
'''
def intersect(lx1, ly1, lx2, ly2, rx1, ry1, rx2, ry2):
return 1 if (dett(lx1, ly1, lx2, ly2, rx1, ry1) * dett(lx1, ly1, lx2, ly2, rx2, ry2) <= 0 and
dett(rx1, ry1, rx2, ry2, lx1, ly1) * dett(rx1, ry1, rx2, ry2, lx2, ly2) <= 0) el
|
se 0
def within(p, x, y):
z = 0
for i in range(0, len(p)-1):
if x == p[i][0] and y == p[i][1]:
continue
if x == p[i+1][0] and y == p[i+1][1]:
continue
z += intersect(x, y, -3232, -4344, p[i][0], p[i][1], p[i+1][0], p[i+1][1])
return 1 if z % 2 == 1 else 0
def _check(p, d, x, y):
for i in range(0, len(p)):
if within(p[i]["coordinates"][0], x, y):
return [d, i]
return []
def check(x, y):
res = _check(polygons78, 78, x, y)
if len(res) > 0:
return 0.2 # 0.078
res = _check(polygons100, 100, x, y)
if len(res) > 0:
return 0.5 # 0.1
res = _check(polygons130, 130, x, y)
if len(res) > 0:
return 0.8 # 0.13
return 1.0
# init()
# #display()
# #x, y = 121.555764, 24.9833
#
# x, y = 121.565764, 24.9830
# res = check(x, y)
# print res
# if (len(res) > 0):
# if (res[0] == 78):
# print_polygon(polygons78[res[1]]["coordinates"][0], 'Red')
# if (res[0] == 100):
# print_polygon(polygons78[res[1]]["coordinates"][0], 'Orange')
# if (res[0] == 130):
# print_polygon(polygons78[res[1]]["coordinates"][0], 'Yellow')
# plt.plot(x, y, marker='*')
# ax.grid()
# ax.axis('equal')
# plt.show()
|
oscardbpucp/Comp-Process-STPatterns
|
clean_and_pretreatment/datos_total_fase1v3-mod.py
|
Python
|
gpl-3.0
| 13,307
| 0.007139
|
## Copyright (C) 2017 Oscar Diaz Barriga
## This file is part of Comp-Process-STPatterns.
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it
|
will be useful,
## but WITHOUT ANY WARRANTY; w
|
ithout even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
# /* Tumbes */
# select count(*) from t_boya_medicion_minpres
# where latitud < -3.392 and latitud > -4.078
# /* Piura */
# select count(*) from t_boya_medicion_minpres
# where latitud < -4.078 and latitud > -6.382
# /* Lambayeque */
# select count(*) from t_boya_medicion_minpres
# where latitud < -6.382 and latitud > -7.177
# /* La Libertad */
# select count(*) from t_boya_medicion_minpres
# where latitud < -7.177 and latitud > -8.9722
# /* Ancash*/
# select count(*) from t_boya_medicion_minpres
# where latitud < -8.9722 and latitud > -10.593
import glob, os
import psycopg2
import datetime
db_user = "USER"
db_host = "IP_ADDRESS"
db_password = "PASSWORD"
output = "./Output/datos_total_boya3_est7_ca1.csv"
class Departamento (object):
def __init__(self, nombre, latitud_min, latitud_max):
self.nombre = nombre
self.latitud_min = latitud_min
self.latitud_max = latitud_max
class Zona (object):
def __init__(self, start_date, end_date, nombre, latitud_min, latitud_max, temperatura, presion, salinidad):
self.start_date = start_date
self.end_date = end_date
self.nombre = nombre
self.latitud_min = latitud_min
self.latitud_max = latitud_max
self.temperatura = temperatura
self.presion = presion
self.salinidad = salinidad
class boya_data (object):
def __init__(self, temperatura, presion, salinidad):
self.temperatura = temperatura
self.presion = presion
self.salinidad = salinidad
class estacion_data (object):
# def __init__(self, temperatura_m, punto_rocio_m, presion_nivel_mar):
# self.est_temperatura_m = temperatura_m
# self.est_punto_rocio_m= punto_rocio_m
# self.est_presion_nivel_mar = presion_nivel_mar
def __init__(self, temperatura_m, punto_rocio_m, presion_nivel_mar,
presion_est_media, velocidad_viento_media, temperatura_maxima,
temperatura_minima):
self.est_temperatura_m = temperatura_m
self.est_punto_rocio_m= punto_rocio_m
self.est_presion_nivel_mar = presion_nivel_mar
self.est_presion_est_media = presion_est_media
self.est_temperatura_minima = temperatura_minima
self.est_temperatura_maxima = temperatura_maxima
self.est_velocidad_viento_media = velocidad_viento_media
class caudal_data (object):
def __init__(self, caudal):
self.caudal = caudal
def database_select_date_between(start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, db_host, db_password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = "select count(*) from t_boya_medicion_minpres where latitud < -3.392 and latitud > -4.078 AND (" \
" concat_ws('-',ano,mes,dia)::date >= '%s'::date" \
" AND" \
" concat_ws('-',ano,mes,dia)::date <= '%s'::date);"%(start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
for row in rows:
print " ", row
def database_select_date_between_lat(start_latitud, end_latitud, start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, host, password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = "select count(*) from t_boya_medicion_minpres where latitud < %s AND latitud > %s AND (" \
" concat_ws('-',ano,mes,dia)::date >= '%s'::date" \
" AND" \
" concat_ws('-',ano,mes,dia)::date <= '%s'::date);"%(start_latitud, end_latitud, start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
count = 0
for row in rows:
count = row[0]
return count
def database_select_date_between_lat_avg(start_latitud, end_latitud, start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, host, password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = "select avg(temp), avg(pres), avg(psal) from t_boya_medicion_minpres " \
" where latitud < %s AND latitud > %s AND (" \
" concat_ws('-',ano,mes,dia)::date >= '%s'::date" \
" AND" \
" concat_ws('-',ano,mes,dia)::date <= '%s'::date);"%(start_latitud, end_latitud, start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
count = 0
b_data = None
for row in rows:
b_data = boya_data(row[0], row[1], row[2])
return b_data
def database_select_date_between_lat_avg_estacion(region, start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, host. password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = "Select avg(em.temp_m), avg(em.punto_rocio_m), avg(em.presion_nivel_mar), " \
"avg(em.presion_est_m), avg(em.veloc_viento_m), avg(em.temp_max), avg(em.temp_min) " \
" From t_region r, t_estacion e, t_estacion_medicion em " \
" Where e.id_region = r.id_region AND r.nombre like '%s' " \
" AND em.id_estacion = e.id_estacion " \
" AND concat_ws('-',ano,mes,dia)::date >= '%s'::date " \
" AND concat_ws('-',ano,mes,dia)::date <= '%s'::date;"%(region, start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
count = 0
b_data = None
for row in rows:
b_data = estacion_data(row[0], row[1], row[2], row[3], row[4], row[5], row[6])
return b_data
def database_select_date_between_lat_avg_caudal(region, start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, host, password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = " Select avg(c.caudal) From t_caudal_medicion c " \
" Where c.region like '%s' AND c.caudal != 9999 " \
" AND concat_ws('-',ano,mes,dia)::date >= '%s'::date " \
" AND concat_ws('-',ano,mes,dia)::date <= '%s'::date;"%(region, start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
count = 0
c_data = None
for row in rows:
c_data = caudal_data(row[0])
return c_data
# def count_boyas_range_space_and_time(i, start_date_unix, step_date, latitude, longitude):
# t_start = start_date_unix + i * step_date
# t_end = start_date_unix + (i + 1) * step_d
|
klahnakoski/TestLog-ETL
|
vendor/jx_sqlite/schema.py
|
Python
|
mpl-2.0
| 4,659
| 0.001502
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.queries import get_property_name
from jx_sqlite.utils import GUID, untyped_column
from mo_dots import concat_field, relative_field, set_default, startswith_field
from mo_json import EXISTS, OBJECT, STRUCT
from mo_logs import Log
class Schema(object):
"""
A Schema MAPS ALL COLUMNS IN SNOWFLAKE FROM THE PERSPECTIVE OF A SINGLE TABLE (a nested_path)
"""
def __init__(self, nested_path, snowflake):
if nested_path[-1] != '.':
Log.error("Expecting full nested path")
self.path = concat_field(snowflake.fact_name, nested_path[0])
self.nested_path = nested_path
self.snowflake = snowflake
# def add(self, column_name, column):
# if column_name != column.names[self.nested_path[0]]:
# Log.error("Logic error")
#
# self.columns.append(column)
#
# for np in self.nested_path:
# rel_name = column.names[np]
# container = self.namespace.setdefault(rel_name, set())
# hidden = [
# c
# for c in container
# if len(c.nested_path[0]) < len(np)
# ]
# for h in hidden:
# container.remove(h)
#
# container.add(column)
#
# container = self.namespace.setdefault(column.es_column, set())
# container.add(column)
# def remove(self, column_name, column):
# if column_name != column.names[self.nested_path[0]]:
# Log.error("Logic error")
#
# self.namespace[column_name] = [c for c in self.namespace[column_name] if c != column]
def __getitem__(self, item):
output = self.snowflake.namespace.columns.find(self.path, item)
return output
# def __copy__(self):
# output = Schema(self.nested_path)
# for k, v in self.namespace.items():
# output.namespace[k] = copy(v)
# return output
def get_column_name(self, column):
"""
RETURN THE COLUMN NAME, FROM THE PERSPECTIVE OF THIS SCHEMA
:param column:
:return: NAME OF column
"""
relative_name = relative_field(column.name, self.nested_path[0])
return get_property_name(relative_name)
@property
def namespace(self):
return self.snowflake.namespace
def keys(self):
"""
:return: ALL COLUMN NAMES
"""
return set(c.name for c in self.columns)
@property
def columns(self):
retur
|
n self.snowflake.namespace.columns.find(self.snowflake.fact_name)
def column(self, prefix):
full_name = untyped_column(concat_field(self.nested_path, prefix))
return set(
c
for c in self.snowflake.namespace.columns.find(self.snowflake.fact_name)
for k, t in [untyped_column(c.name)]
if k == full_name and k != GUID
if c.jx_type not in [OBJECT, EXISTS]
)
def leaves(self, prefix):
|
full_name = concat_field(self.nested_path, prefix)
return set(
c
for c in self.snowflake.namespace.columns.find(self.snowflake.fact_name)
for k in [c.name]
if startswith_field(k, full_name) and k != GUID or k == full_name
if c.jx_type not in [OBJECT, EXISTS]
)
def map_to_sql(self, var=""):
"""
RETURN A MAP FROM THE RELATIVE AND ABSOLUTE NAME SPACE TO COLUMNS
"""
origin = self.nested_path[0]
if startswith_field(var, origin) and origin != var:
var = relative_field(var, origin)
fact_dict = {}
origin_dict = {}
for k, cs in self.namespace.items():
for c in cs:
if c.jx_type in STRUCT:
continue
if startswith_field(get_property_name(k), var):
origin_dict.setdefault(c.names[origin], []).append(c)
if origin != c.nested_path[0]:
fact_dict.setdefault(c.name, []).append(c)
elif origin == var:
origin_dict.setdefault(concat_field(var, c.names[origin]), []).append(c)
if origin != c.nested_path[0]:
fact_dict.setdefault(concat_field(var, c.name), []).append(c)
return set_default(origin_dict, fact_dict)
|
jrichte43/ProjectEuler
|
Problem-0364/solutions.py
|
Python
|
gpl-3.0
| 808
| 0.006188
|
__problem_title__ = "Comfortable distance"
__problem_url___ = "https://projecteuler.net/problem=364"
__problem_description__ = "There are seats in a row. people come after each other to fill the " \
"seats according to the following rules: We can verify that T(10) = " \
"61632 and T(1 000) mod 100 000 007 = 47255
|
094. Find T(1 000 000) mod " \
"100 000 007."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
|
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
|
Havate/havate-openstack
|
proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/admin/instances/forms.py
|
Python
|
apache-2.0
| 3,609
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Kylin OS, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
class LiveMigrateForm(forms.SelfHandlingForm):
current_host = forms.CharField(label=_("Current Host"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
host = forms.ChoiceField(label=_("New Host"),
required=True,
help_text=_("Choose a Host to migrate to."))
disk_over_commit = forms.BooleanField(label=_("Disk Over Commit"),
initial=False, required=False)
block_migration = forms.BooleanField(label=_("Block Migration"),
initial=False, required=False)
def __init__(self, request, *args, **kwargs):
super(LiveMigrateForm, self).__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
instance_id = initial.get('instance_id')
self.fields['instance_id'] = forms.CharField(widget=forms.HiddenInput,
initial=instance_id)
self.fields['host'].choices = self.populate_host_choices(request
|
,
|
initial)
def populate_host_choices(self, request, initial):
hosts = initial.get('hosts')
current_host = initial.get('current_host')
host_list = [(host.hypervisor_hostname,
host.hypervisor_hostname)
for host in hosts
if host.hypervisor_hostname != current_host]
if host_list:
host_list.insert(0, ("", _("Select a new host")))
else:
host_list.insert(0, ("", _("No other hosts available.")))
return sorted(host_list)
def handle(self, request, data):
try:
block_migration = data['block_migration']
disk_over_commit = data['disk_over_commit']
api.nova.server_live_migrate(request,
data['instance_id'],
data['host'],
block_migration=block_migration,
disk_over_commit=disk_over_commit)
msg = _('The instance is preparing the live migration '
'to host "%s".') % data['host']
messages.success(request, msg)
return True
except Exception:
msg = _('Failed to live migrate instance to '
'host "%s".') % data['host']
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request, msg, redirect=redirect)
|
foone/3dmmInternals
|
generate/lib/pyudd.py
|
Python
|
unlicense
| 17,726
| 0.007221
|
# -*- coding: Latin-1 -*-
#!/usr/bin/env python
"""PyUdd, a python module for OllyDbg .UDD files
Ange Albertini 2010, Public domain
"""
__author__ = 'Ange Albertini'
__contact__ = 'ange@corkami.com'
__revision__ = "$Revision$"
__version__ = '0.1 r%d'
import struct
HDR_STRING = "Mod\x00"
FTR_STRING = "\nEnd"
#TODO: find standard notation to keep it inside init_mappings
udd_formats = [
(11, "Module info file v1.1\x00"),
(20, "Module info file v2.0\x00"),
(20, "Module info file v2.01g\x00"),
]
def init_mappings():
"""initialize constants' mappings"""
format_ids = [
"STRING",
"DDSTRING",
"MRUSTRING",
"EMPTY",
"VERSION",
"DWORD",
"DD2",
"DD2STRING",
"BIN",
"NAME",
"CRC2",
]
F_ = dict([(e, i) for i, e in enumerate(format_ids)])
udd_formats = [
(11, "Module info file v1.1\x00"),
(20, "Module info file v2.0\x00"),
(20, "Module info file v2.01g\x00"),
]
Udd_Formats = dict(
[(e[1], e[0]) for e in udd_formats] +
udd_formats)
#OllyDbg 1.1
chunk_types11 = [
("Header", HDR_STRING, F_["STRING"]),
("Footer", FTR_STRING, F_["EMPTY"]),
("Filename", "\nFil", F_["STRING"]),
("Version", "\nVer", F_["VERSION"]),
("Size", "\nSiz", F_["DWORD"]),
("Timestamp", "\nTst", F_["DD2"]),
("CRC", "\nCcr", F_["DWORD"]),
("Patch", "\nPat", F_["BIN"]),
("Bpc", "\nBpc", F_["BIN"]),
("Bpt", "\nBpt", F_["BIN"]),
("HwBP", "\nHbr", F_["BIN"]),
("Save", "\nSva", F_["BIN"]), # sometimes 4, sometimes 5 ?
("AnalyseHint", "\nAht", F_["BIN"]),
("CMD_PLUGINS", "\nUs0", F_["DDSTRING"]), # multiline, needs escaping
("U_LABEL", "\nUs1", F_["DDSTRING"]),
("A_LABEL", "\nUs4", F_["DDSTRING"]),
("U_COMMENT", "\nUs6", F_["DDSTRING"]),
("BPCOND", "\nUs8", F_["DDSTRING"]),
("ApiArg", "\nUs9", F_["DDSTRING"]),
("USERLABEL", "\nUs1", F_["DDSTRING"]),
("Watch", "\nUsA", F_["DDSTRING"]),
("US2", "\nUs2", F_["BIN"]),
("US3", "\nUs3", F_["BIN"]),
("_CONST", "\nUs5", F_["BIN"]),
("A_COMMENT", "\nUs7", F_["BIN"]),
("FIND?", "\nUsC", F_["BIN"]),
("SOURCE?", "\nUsI", F_["BIN"]),
("MRU_Inspect","\nUs@", F_["MRUSTRING"]),
("MRU_Asm", "\nUsB", F_["MRUSTRING"]),
("MRU_Goto", "\nUsK", F_["MRUSTRING"]), #?
("MRU_Explanation", "\nUs|", F_["MRUSTRING"]), # logging bp explanation
("MRU_Expression", "\nUs{", F_["MRUSTRING"]), # logging bp expression
("MRU_Watch", "\nUsH", F_["MRUSTRING"]),
("MRU_Label", "\nUsq", F_["MRUSTRING"]), #?
("MRU_Comment", "\nUsv", F_["MRUSTRING"]), #?
("MRU_Condition", "\nUsx", F_["MRUSTRING"]), #?
("MRU_CMDLine", "\nCml", F_["STRING"]), #?
("LogExpression", "\nUs;", F_["DDSTRING"]), # logging bp expression
("ANALY_COMM", "\nUs:",
|
F_["DDSTRING"]), #
("US?", "\nUs?", F_["DDSTRING"]), #?
("TracCond", "\nUsM", F_["DDSTRING"]), # tracing condition
("LogExplanation", "\nUs<", F_["DDSTRING"]), # logging bp explanation
("AssumedArgs", "\nUs=", F_["DDSTRING"]), # Assumed arguments
("CFA", "\nCfa", F_["DD2"]), #?
("CFM", "\nCfm", F_["DD2STRIN
|
G"]), #?
("CFI", "\nCfi", F_["DD2"]), #?
("US>", "\nUs>", F_["BIN"]), #?
("ANC", "\nAnc", F_["BIN"]), #?
("JDT", "\nJdt", F_["BIN"]), #?
("PRC", "\nPrc", F_["BIN"]), #?
("SWI", "\nSwi", F_["BIN"]), #?
]
#OllyDbg 2
chunk_types20 = [
("Header", HDR_STRING, F_["STRING"]),
("Footer", FTR_STRING, F_["EMPTY"]),
("Filename", "\nFil", F_["STRING"]),
("Infos", "\nFcr", F_["CRC2"]), #?
("Name", "\nNam", F_["NAME"]), #?
("Data", "\nDat", F_["NAME"]), #?
("MemMap", "\nMba", F_["DDSTRING"]), #?
("LSA", "\nLsa", F_["NAME"]), # MRU entries
("JDT", "\nJdt", F_["BIN"]), #?
("PRC", "\nPrc", F_["BIN"]), #?
("SWI", "\nSwi", F_["BIN"]), #?
("CBR", "\nCbr", F_["BIN"]), #?
("LBR", "\nLbr", F_["BIN"]), #?
("ANA", "\nAna", F_["BIN"]), #?
("CAS", "\nCas", F_["BIN"]), #?
("PRD", "\nPrd", F_["BIN"]), #?
("Save", "\nSav", F_["BIN"]), #?
("RTC", "\nRtc", F_["BIN"]), #?
("RTP", "\nRtp", F_["BIN"]), #?
("Int3", "\nIn3", F_["BIN"]), #?
("MemBP", "\nBpm", F_["BIN"]), #?
("HWBP", "\nBph", F_["BIN"]), #?
]
Chunk_Types11 = dict(
[(e[1], e[0]) for e in chunk_types11] +
[(e[0], e[1]) for e in chunk_types11]
)
Chunk_Types20 = dict(
[(e[1], e[0]) for e in chunk_types20] +
[(e[0], e[1]) for e in chunk_types20]
)
Chunk_Types = {
11: Chunk_Types11,
20: Chunk_Types20
}
# no overlapping of formats yet so they're still merged
#
Chunk_Formats = dict(
[(e[2], e[0]) for e in chunk_types11] +
[(e[0], e[2]) for e in chunk_types11] +
[(e[2], e[0]) for e in chunk_types20] +
[(e[0], e[2]) for e in chunk_types20]
)
olly2cats = [
# used in DATA and NAME
#
('!', "UserLabel"),
('0', "UserComment"),
('1', "Import"),
('2', "APIArg"),
('3', "APICall"),
('4', "Member"),
('6', "Unk6"),
('*', "Struct"),
# only used in LSA ?
#
('`', 'mru_label'),
('a', 'mru_asm'),
('c', 'mru_comment'),
('d', 'watch'),
('e', 'mru_goto'),
('p', 'trace_condition1'),
('q', 'trace_condition2'),
('r', 'trace_condition3'),
('s', 'trace_condition4'),
('t', 'trace_command1'),
('u', 'trace_command2'),
('v', 'protocol_start'),
('w', 'protocol_end'),
('Q', 'log_explanation'),
('R', 'log_condition'),
('S', 'log_expression'),
('U', 'mem_explanation'),
('V', 'mem_condition'),
('W', 'mem_expression'),
('Y', 'hbplog_explanation'),
('Z', 'hbplog_condition'),
('[', 'hbplog_expression'),
]
Olly2Cats = dict(
[(e[1], e[0]) for e in olly2cats] +
olly2cats)
return Udd_Formats, F_, Chunk_Types, Chunk_Formats, Olly2Cats
UDD_FORMATS, F_, CHUNK_TYPES, CHUNK_FORMATS, OLLY2CATS = init_mappings()
def binstr(data):
"""return a stream as hex sequence"""
return " ".join(["%02X" % ord(c) for c in data])
def elbinstr(data):
"""return a stream as hex sequence, ellipsed if too long"""
if len(data) < 10:
return binstr(data)
return "(%i) %s ... %s" % (len(data), binstr(data[:10]), binstr(data[-10:]))
class Error(Exception):
"""custom error class"""
pass
def crc32mpeg(buffer_):
"""computes the CRC32 MPEG of a buffer"""
crc = 0xffffffff
for c in buffer_:
octet = ord(c)
for i in range(8):
topbit = crc & 0x80000000
if octet & (0x80 >> i):
topbit ^= 0x80000000
crc <<= 1
if topbit:
crc ^= 0x4c11db7
crc &= 0xffffffff
return crc
def getcrc(filename):
"""returns the UDD crc of a file, by its filename"""
# probably not always correct
import pefile
pe = pefile.PE(filename)
sec = pe.sections[0]
align = pe.OPTIONAL_HEADER.SectionAlignment
data = sec.get_data(sec.VirtualAddress)
ActualSize = max(sec.Misc_VirtualSize, sec.SizeOfRawData)
data += "\0" * (ActualSize - len(data))
rem = ActualSize % align
if rem:
data += "\0" * (align - rem)
return crc32mpeg(data)
def getTimestamp(filename):
"""read LastModified timestamp and return as a binar
|
bewest/glucodump
|
glucodump/stream.py
|
Python
|
gpl-2.0
| 3,981
| 0.017332
|
#!/usr/bin/python
import sys, os
import select, socket
import usbcomm
import usb
_default_host = 'localhost'
_default_port = 23200
_READ_ONLY = select.POLLIN | select.POLLPRI
class Stream(object):
def __init__(self,
host=_default_host,
port=_default_port):
self.host = host
self.port = port
self.usb = usbcomm.USBComm(idVendor=usbcomm.ids.Bayer, idProduct=usbcomm.ids.Bayer.Contour)
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setblocking(0)
self.poller = select.poll()
self.fd_to_socket = {}
self.clients = []
def close(self):
print >>sys.stderr, '\nMUX > Closing...'
for client in self.clients:
client.close()
self.usb.close()
self.server.close()
print >>sys.stderr, 'MUX > Done! =)'
def add_client(self, client):
print >>sys.stderr, 'MUX > New connection from', client.getpeername()
client.setblocking(0)
self.fd_to_socket[client.fileno()] = client
self.clients.append(client)
self.poller.register(client, _READ_ONLY)
def remove_client(self, client, why='?'):
try:
name = client.getpeername()
except:
name = 'client %d' % client.fileno()
print >>sys.stderr, 'MUX > Closing %s: %s' % (name, why)
self.poller.unregister(client)
self.clients.remove(client)
client.close()
def read(self):
self.sink = None
try:
data = self.usb.read( )
self.sink = data
except usb.core.USBError, e:
if e.errno != 110:
print e, dir(e), e.backend_error_code, e.errno
raise
return self.sink is not None
def flush(self):
if self.sink is not None:
for client in self.clients:
client.send(self.sink)
self.sink = None
def run(self):
try:
# self.tty.setTimeout(0) # Non-blocking
# self.tty.flushInput()
# self.tty.flushOutput()
# self.poller.register(self.usb.epout.bEndpointAddress, _READ_ONLY)
# self.fd_to_socket[self.usb.epout.bEndpointAddress] = self.usb
# print >>sys.stderr, 'MUX > Serial port: %s @ %s' % (self.device, self.baudrate)
print >>sys.stderr, 'MUX > usb port: %s' % (self.usb)
self.server.bind((self.host, self.port))
self.server.listen(5)
self.poller.register(self.server, _READ_ONLY)
self.fd_to_socket[self.server.fileno()] = self.server
print >>sys.stderr, 'MUX > Server: %s:%d' % self.server.getsockname()
print >>sys.stderr, 'MUX > Use ctrl+c to stop...\n'
while True:
events = self.poller.poll(500)
if self.read( ):
self.flush( )
for fd, flag in events:
# Get socket from fd
s = self.fd_to_socket[fd]
print fd, flag, s
if flag & select.POLLHUP:
self.remove_client(s, 'HUP')
elif flag & select.POLLERR:
self.remove_client(s, 'Received error')
elif flag & (
|
_READ_ONLY):
# A readable server socket is ready to accept a connection
if s is self.server:
connection, client_address = s.accept()
self.add_client(connection)
# Data from serial port
elif s is self.usb:
|
data = s.read( )
for client in self.clients:
client.send(data)
# Data from client
else:
data = s.recv(80)
# Client has data
print "send to usb"
if data: self.usb.write(data)
# Interpret empty result as closed connection
else: self.remove_client(s, 'Got no data')
except usb.core.USBError, e:
print >>sys.stderr, '\nMUX > USB error: "%s". Closing...' % e
except socket.error, e:
print >>sys.stderr, '\nMUX > Socket error: %s' % e.strerror
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.close()
if __name__ == '__main__':
s = Stream( )
s.run( )
|
mcallaghan/tmv
|
BasicBrowser/scoping/migrations/0048_auto_20170209_1708.py
|
Python
|
gpl-3.0
| 658
| 0.00152
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-02-09 17:08
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class M
|
igration(migrations.Migration):
dependencies = [
('scoping', '0047_auto_20170209_1626'),
]
operations = [
migrations.RemoveField(
model_name='query',
name='technology',
),
migrations.AddField(
model_name='query',
name='technology',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='scoping.Technology'),
),
]
|
datacommonsorg/tools
|
stat_var_renaming/stat_var_renaming_constants.py
|
Python
|
apache-2.0
| 13,060
| 0.001914
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limi
|
tations under
# the License.
"""
This file contains various constants and helper code to generate constants
that are used in the Statistical Variable renaming.
"""
import pandas as pd
import collections
import re
def capitalizeFirst(word):
""
|
" Capitalizes the first letter of a string. """
return word[0].upper() + word[1:]
def standard_name_remapper(orig_name):
""" General renaming function for long strings into Pascal case.
Text inbetween trailing parentheses is removed.
Commas, dashes, and "ands" are removed. Then string is converted into Pascal
case without and spaces present.
"""
# Remove any trailing parentheses.
# TODO(tjann): to check if this is safe.
paren_start = orig_name.find("(")
if paren_start != -1:
orig_name = orig_name[:paren_start]
# Removes separating words.
orig_name = orig_name.replace(",", " ")
orig_name = orig_name.replace("-", " ")
orig_name = orig_name.replace("and ", "")
return "".join([word.capitalize() for word in orig_name.split()])
def _create_naics_map():
""" Downloads all NAICS codes across long and short form codes. """
# Read in list of industry topics.
naics_codes = pd.read_excel(
"https://www.census.gov/eos/www/naics/2017NAICS/2-6%20digit_2017_Codes.xlsx"
)
naics_codes = naics_codes.iloc[:, [1, 2]]
naics_codes.columns = ['NAICSCode', 'Title']
# Replace all ranges with individual rows. E.g. 31-33 -> 31, 32, 33.
def range_to_array(read_code):
if isinstance(read_code, str) and "-" in read_code:
lower, upper = read_code.split("-")
return list(range(int(lower), int(upper) + 1))
return read_code
naics_codes = naics_codes.dropna()
naics_codes['NAICSCode'] = naics_codes['NAICSCode'].apply(range_to_array)
naics_codes = naics_codes.explode('NAICSCode')
# Add unclassified code which is used in some statistical variables.
naics_codes = naics_codes.append(
{
"NAICSCode": 99,
"Title": "Nonclassifiable"
}, ignore_index=True)
# Query for only two digit codes.
short_codes = naics_codes[naics_codes['NAICSCode'] < 100]
short_codes = short_codes.set_index("NAICSCode")
short_codes = short_codes['Title'].to_dict()
# Read in overview codes.
overview_codes = pd.read_csv(
"https://data.bls.gov/cew/doc/titles/industry/high_level_industries.csv"
)
overview_codes.columns = ["NAICSCode", "Title"]
overview_codes = overview_codes.set_index("NAICSCode")
overview_codes = overview_codes['Title'].to_dict()
# Combine the two sources of codes.
NAICS_MAP = {}
combined_codes = short_codes
combined_codes.update(overview_codes)
# Rename industries into Pascal case.
for code, orig_name in combined_codes.items():
NAICS_MAP[str(code)] = standard_name_remapper(orig_name)
# Other edge cases.
NAICS_MAP['00'] = 'Unclassified'
return NAICS_MAP
# TODO(iancostello): Consider adding function memoization.
NAICS_MAP = _create_naics_map()
### True Constants
# Template of Stat Var MCF.
TEMPLATE_STAT_VAR = """
Node: dcid:{human_readable_dcid}
typeOf: dcs:StatisticalVariable
populationType: dcs:{populationType}
statType: dcs:{statType}
measuredProperty: dcs:{measuredProperty}
{CONSTRAINTS}"""
# Main query for stat vars. Combines across populations and observations
# to create statistical variables.
QUERY_FOR_ALL_STAT_VARS = """
SELECT DISTINCT
SP.population_type as populationType,
{CONSTRAINTS}
{POPULATIONS}
O.measurement_qualifier AS measurementQualifier,
O.measurement_denominator as measurementDenominator,
O.measured_prop as measuredProp,
O.unit as unit,
O.scaling_factor as scalingFactor,
O.measurement_method as measurementMethod,
SP.num_constraints as numConstraints,
CASE
WHEN O.measured_value IS NOT NULL THEN "measuredValue"
WHEN O.sum_value IS NOT NULL THEN "sumValue"
WHEN O.mean_value IS NOT NULL THEN "meanValue"
WHEN O.min_value IS NOT NULL THEN "minValue"
WHEN O.max_value IS NOT NULL THEN "maxValue"
WHEN O.std_deviation_value IS NOT NULL THEN "stdDeviationValue"
WHEN O.growth_rate IS NOT NULL THEN "growthRate"
WHEN O.median_value IS NOT NULL THEN "medianValue"
ELSE "Unknown"
END AS statType
FROM
`google.com:datcom-store-dev.dc_v3_clustered.StatisticalPopulation`
AS SP JOIN
`google.com:datcom-store-dev.dc_v3_clustered.Observation`
AS O
ON (SP.id = O.observed_node_key)
WHERE
O.type <> "ComparativeObservation"
AND SP.is_public
AND SP.prov_id NOT IN ({comma_sep_prov_blacklist})
"""
# Dataset blacklist.
_BIO_DATASETS = frozenset([
'dc/p47rsv3', # UniProt
'dc/0cwj4g1', # FDA_Pharmacologic_Class
'dc/5vxrbh3', # SIDER
'dc/ff08ks', # Gene_NCBI
'dc/rhjyj31', # MedicalSubjectHeadings
'dc/jd648v2', # GeneticVariantClinVar
'dc/x8m41b1', # ChEMBL
'dc/vbyjkh3', # SPOKESymptoms
'dc/gpv9pl2', # DiseaseOntology
'dc/8nwtbj2', # GTExSample0
'dc/t5lx1e2', # GTExSample2
'dc/kz0q1c2', # GTExSample1
'dc/8xcvhx', # GenomeAssemblies
'dc/hgp9hn1', # Species
'dc/9llzsx1', # GeneticVariantUCSC
'dc/f1fxve1', # Gene_RNATranscript_UCSC
'dc/mjgrfc', # Chromosome
'dc/h2lkz1', # ENCODEProjectSample
])
_MISC_DATASETS = frozenset([
'dc/93qydx3', # NYBG
'dc/g3rq1f1', # DeepSolar
'dc/22t2hr3', # EIA_860
'dc/zkhvp12', # OpportunityInsightsOutcomes
'dc/89fk9x3', # CollegeScorecard
])
# List of constraint prefixes to remove from certain properties.
CONSTRAINT_PREFIXES_TO_STRIP = {
'nativity': 'USC',
'age': 'USC',
'institutionalization': 'USC',
'educationStatus': 'USC',
'povertyStatus': 'USC',
'workExperience': 'USC',
'nativity': 'USC',
'race': ['USC', 'CDC', 'DAD'],
'employment': ['USC', 'BLS'],
'employmentStatus': ['USC', 'BLS'],
'schoolGradeLevel': 'NCES',
'patientRace': 'DAD'
}
# List of drug renamings. Note that some drugs are intentionally excluded.
DRUG_REMAPPINGS = {
'drug/dea/1100': 'Amphetamine',
'drug/dea/1105B': 'DlMethamphetamine',
'drug/dea/1105D': 'DMethamphetamine',
'drug/dea/1205': 'Lisdexamfetamine',
'drug/dea/1248': 'Mephedrone',
'drug/dea/1615': 'Phendimetrazine',
'drug/dea/1724': 'Methylphenidate',
'drug/dea/2010': 'GammaHydroxybutyricAcid',
'drug/dea/2012': 'FDAApprovedGammaHydroxybutyricAcidPreparations',
'drug/dea/2100': 'BarbituricAcidDerivativeOrSalt',
'drug/dea/2125': 'Amobarbital',
'drug/dea/2165': 'Butalbital',
'drug/dea/2270': 'Pentobarbital', # Intentionally duplicated
'drug/dea/2285': 'Phenobarbital', #
'drug/dea/2315': 'Secobarbital',
'drug/dea/2765': 'Diazepam',
'drug/dea/2783': 'Zolpidem',
'drug/dea/2885': 'Lorazepam',
'drug/dea/4000': 'AnabolicSteroids',
'drug/dea/4187': 'Testosterone',
'drug/dea/7285': 'Ketamine',
'drug/dea/7315D': 'Lysergide',
'drug/dea/7365': 'MarketableOralDronabinol',
'drug/dea/7369': 'DronabinolGelCapsule',
'drug/dea/7370': 'Tetrahydrocannabinol',
'drug/dea/7377': 'Cannabicyclol',
'drug/dea/7379': 'Nabilone',
'drug/dea/7381': 'Mescaline',
'drug/dea/7400': '34Methylenedioxyamphetamine',
'drug/dea/7431': '5MethoxyNNDimethyltryptamine',
'drug/dea/7433': 'Bufotenine',
'drug/dea/7437': 'Psilocybin',
'drug/dea/7438': 'Psilocin',
'drug/dea/7455': 'PCE',
'drug/dea/7471': 'Phencyclidine',
'drug/dea/7540': 'Methylone',
'drug/dea/9010': 'Alphaprodine',
'drug/dea/9020': 'Anileridine',
'drug/dea/9041L': 'Co
|
opnsense/core
|
src/opnsense/service/configd_ctl.py
|
Python
|
bsd-2-clause
| 5,598
| 0.004287
|
#!/usr/local/bin/python3
"""
Copyright (c) 2015-2019 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFIT
|
S; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
------------------------------------------------------------
|
--------------------------
package : configd
function: commandline tool to send commands to configd (response to stdout)
"""
import argparse
import socket
import os.path
import traceback
import sys
import syslog
import time
from select import select
from modules import syslog_error, syslog_notice
__author__ = 'Ad Schellevis'
configd_socket_name = '/var/run/configd.socket'
configd_socket_wait = 20
def exec_config_cmd(exec_command):
""" execute command using configd socket
:param exec_command: command string
:return: string
"""
# Create and open unix domain socket
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(configd_socket_name)
except socket.error:
syslog_error('unable to connect to configd socket (@%s)'%configd_socket_name)
print('unable to connect to configd socket (@%s)'%configd_socket_name, file=sys.stderr)
return None
try:
sock.send(exec_command.encode())
data = []
while True:
line = sock.recv(65536).decode()
if line:
data.append(line)
else:
break
return ''.join(data)[:-3]
except:
syslog_error('error in configd communication \n%s'%traceback.format_exc())
print ('error in configd communication %s, see syslog for details', file=sys.stderr)
finally:
sock.close()
parser = argparse.ArgumentParser()
parser.add_argument("-m", help="execute multiple arguments at once", action="store_true")
parser.add_argument("-e", help="use as event handler, execute command on receiving input", action="store_true")
parser.add_argument("-d", help="detach the execution of the command and return immediately", action="store_true")
parser.add_argument("-q", help="run quietly by muting standard output", action="store_true")
parser.add_argument(
"-t",
help="threshold between events, wait this interval before executing commands, combine input into single events",
type=float
)
parser.add_argument("command", help="command(s) to execute", nargs="+")
args = parser.parse_args()
syslog.openlog("configctl")
# set a timeout to the socket
socket.setdefaulttimeout(120)
# check if configd socket exists
# (wait for a maximum of "configd_socket_wait" seconds for configd to start)
i=0
while not os.path.exists(configd_socket_name):
if i >= configd_socket_wait:
break
time.sleep(1)
i += 1
if not os.path.exists(configd_socket_name):
print('configd socket missing (@%s)'%configd_socket_name, file=sys.stderr)
sys.exit(-1)
# command(s) to execute
if args.m:
# execute multiple commands at once ( -m "action1 param .." "action2 param .." )
exec_commands=args.command
else:
# execute single command sequence
exec_commands=[' '.join(args.command)]
if args.e:
# use as event handler, execute configd command on every line on stdin
last_message_stamp = time.time()
stashed_lines = list()
while True:
rlist, _, _ = select([sys.stdin], [], [], args.t)
if rlist:
last_message_stamp = time.time()
r_line = sys.stdin.readline()
if len(r_line) == 0:
#EOFError. pipe broken?
sys.exit(-1)
stashed_lines.append(r_line)
if len(stashed_lines) >= 1 and (args.t is None or time.time() - last_message_stamp > args.t):
# emit event trigger(s) to syslog
for line in stashed_lines:
syslog_notice("event @ %.2f msg: %s" % (last_message_stamp, line))
# execute command(s)
for exec_command in exec_commands:
syslog_notice("event @ %.2f exec: %s" % (last_message_stamp, exec_command))
exec_config_cmd(exec_command=exec_command)
stashed_lines = list()
else:
# normal execution mode
for exec_command in exec_commands:
if args.d:
exec_command = '&' + exec_command
result=exec_config_cmd(exec_command=exec_command)
if result is None:
sys.exit(-1)
if not args.q:
print('%s' % (result.strip()))
|
Geoportail-Luxembourg/geoportailv3
|
geoportal/geoportailv3_geoportal/admin/admin.py
|
Python
|
mit
| 178
| 0
|
from c2cgeoportal_admin.views.layertree import itemtypes_tables
itemtypes_tables.update({
'lu_int_wms': 'lux_layer_
|
internal_wms',
'lu_ext_wms': 'lux_layer_external_wms',
})
| |
google/starthinker
|
examples/dcm_run_example.py
|
Python
|
apache-2.0
| 3,107
| 0.011265
|
###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see scripts folder for possible source):
# - Command: "python starthinker_ui/manage.py example"
#
###########################################################################
import argparse
import textwrap
from starthinker.util.configuration import Configuration
from starthinker.task.dcm.run import dcm
def recipe_dcm_run(config, auth_read, account, report_id, report_name):
"""Trigger a CM report run
Args:
auth_read (authentication) - Credentials used for reading data.
account (integer) - CM network id.
report_id (integer) - CM report id, empty if using name.
report_name (string) - CM report name, empty if using id instead.
"""
dcm(config, {
'auth':auth_read,
'report_run_only':True,
'report':{
'account':account,
'report_id':report_id,
'name':report_name
}
})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Trigger a CM report run
1. Specify an account id.
2. Specify either report name or report id to run.
"""))
parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)
parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)
parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)
parser.add_argument("-user", help="Path to USER credentials json file.", default=None)
parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)
parser.add_argument("-verbose", help="Print all the step
|
s as they happen.", action="store_true")
parser.add_argument("-auth_read", help="Credentials used for reading data.", default='user')
parser.add_argument("-account", help="CM network id.", default='')
parser.add_argument("-report_id", help="CM report id, empty if using name.", d
|
efault='')
parser.add_argument("-report_name", help="CM report name, empty if using id instead.", default='')
args = parser.parse_args()
config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)
recipe_dcm_run(config, args.auth_read, args.account, args.report_id, args.report_name)
|
nop33/indico
|
indico/modules/cephalopod/blueprint.py
|
Python
|
gpl-3.0
| 1,345
| 0.003717
|
# This fi
|
le is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of
|
the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.modules.cephalopod.controllers import RHCephalopod, RHCephalopodSync, RHSystemInfo
from indico.web.flask.wrappers import IndicoBlueprint
cephalopod_blueprint = _bp = IndicoBlueprint('cephalopod', __name__, template_folder='templates',
virtual_template_folder='cephalopod')
_bp.add_url_rule('/admin/community-hub/', 'index', RHCephalopod, methods=('GET', 'POST'))
_bp.add_url_rule('/admin/community-hub/sync', 'sync', RHCephalopodSync, methods=('POST',))
_bp.add_url_rule('/system-info', 'system-info', RHSystemInfo)
|
cjgrady/compression
|
src/matrix/matrix.py
|
Python
|
gpl-2.0
| 3,131
| 0.023315
|
"""
@summary: Module contain matrix base classes
|
@author: CJ Grady
@version
|
: 1.0
@status: alpha
@license: gpl2
@copyright: Copyright (C) 2014, University of Kansas Center for Research
Lifemapper Project, lifemapper [at] ku [dot] edu,
Biodiversity Institute,
1345 Jayhawk Boulevard, Lawrence, Kansas, 66045, USA
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
"""
class Grid(object):
"""
@summary: Base class for Lifemapper grids. This class can be used with
uncompressed grids.
"""
# ...........................
def __init__(self, griddedData=None):
if griddedData is not None:
self._initFromGrid(griddedData)
else:
self.ySize = None
self.xSize = None
self.data = []
self.classes = set([])
# ...........................
def _initFromGrid(self, griddedData):
self.ySize = len(griddedData)
self.xSize = len(griddedData[0])
self.data = griddedData
self.findClasses()
# ...........................
def findClasses(self):
"""
@summary: Finds all of the unique classes in the data
"""
self.classes = set([])
for row in self.data:
for col in row:
self.classes.add(col)
# ...........................
def query(self, x, y):
return self.data[y][x]
# ...........................
def read(self, fn):
self.data = []
with open(fn) as f:
for line in f:
self.data.append([int(i) for i in line.split(' ')])
# ...........................
def write(self, fn):
with open(fn, 'w') as f:
for row in self.data:
f.write('%s\n' % ' '.join([str(i) for i in row]))
# .............................................................................
class _CompressedGrid(Grid):
# ...........................
def __init__(self):
raise Exception, "init must be implemented in sub class"
# ...........................
def query(self, x, y):
raise Exception, "Query must be implemented in sub class"
# ...........................
def read(self, fn):
raise Exception, "Read must be implemented in sub class"
# ...........................
def write(self, fn):
raise Exception, "Write must be implemented in sub class"
|
PolyCortex/pyMuse
|
pymuse/pipeline.py
|
Python
|
mit
| 2,987
| 0.001339
|
from pymuse.pipelinestages.pipeline_stage import PipelineStage
from pymuse.utils.stoppablequeue import StoppableQueue
from pymuse.signal import Signal
from pymuse.constants import PIPELINE_QUEUE_SIZE
class PipelineFork():
"""
This class is used to fork a Pipeline. Ex.: PipelineFork([stage1, stage2], [stage3]) fork the pipeline
in two paths and has two outputs (stage2 and stage3). It is used during the construction of Pipeline.
"""
def __init__(self, *branches):
self.forked_branches: list = list(branches)
class Pipeline():
"""
This class create a multithreaded pipeline. It automatically links together every contiguous stages.
E.g.: Pipeline(Signal(), PipelineStage(), PipelineFork([PipelineStage(), PipelineStage()], [PipelineStage()] ))
"""
|
def __init__(self, input_signal: Signal, *stages):
self._output_queues = []
self._stages: list = list(stages)
self._link_stages(self._stages)
self._stages[0]._queue_in = input_signal.signal_queue
def get_output_queue(self, queue_index=0) -> StoppableQueue:
"""Return a ref to the queue given by queue_index"""
return self._output_queues[queue_index]
def read_output_queue(self, queue_index=0):
"""Wait to read a data in a queue gi
|
ven by queue_index"""
return self._output_queues[queue_index].get()
def start(self):
"""Start all pipelines stages."""
self._start(self._stages)
def shutdown(self):
""" shutdowns every child thread (PipelineStage)"""
self._shutdown(self._stages)
def join(self):
"""Ensure every thread (PipelineStage) of the pipeline are done"""
for stage in self._stages:
stage.join()
def _link_pipeline_fork(self, stages: list, index: int):
for fork in stages[index].forked_branches:
stages[index - 1].add_queue_out(fork[0].queue_in)
self._link_stages(fork)
def _link_stages(self, stages: list):
for i in range(1, len(stages)):
if type(stages[i]) == PipelineFork:
self._link_pipeline_fork(stages, i)
else:
stages[i - 1].add_queue_out(stages[i].queue_in)
if issubclass(type(stages[-1]), PipelineStage):
output_queue = StoppableQueue(PIPELINE_QUEUE_SIZE)
stages[-1].add_queue_out(output_queue)
self._output_queues.append(output_queue)
def _start(self, stages: list):
for stage in stages:
if type(stage) == PipelineFork:
for forked_branch in stage.forked_branches:
self._start(forked_branch)
else:
stage.start()
def _shutdown(self, stages: list):
for stage in stages:
if type(stage) == PipelineFork:
for forked_branch in stage.forked_branches:
self._shutdown(forked_branch)
else:
stage.shutdown()
|
openstack/trove
|
trove/guestagent/common/guestagent_utils.py
|
Python
|
apache-2.0
| 5,574
| 0
|
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import abc
import os
import re
from trove.common import cfg
from trove.common import pagination
from trove.common import utils
from trove.guestagent.common import operating_system
CONF = cfg.CONF
def update_dict(updates, target):
"""Recursively update a target dictionary with given updates.
Updates are provided as a dictionary of key-value pairs
where a value can also be a nested dictionary in which case
its key is treated as a sub-section of the outer key.
If a list value is encountered the update is applied
iteratively on all its items.
:returns: Will always return a dictionary of results (may be empty).
"""
if target is None:
target = {}
if isinstance(target, list):
for index, item in enumerate(target):
target[index] = update_dict(updates, item)
return target
if updates is not None:
for k, v in updates.items():
if isinstance(v, abc.Mapping):
target[k] = update_dict(v, target.get(k, {}))
else:
target[k] = updates[k]
return target
def expand_dict(target, namespace_sep='.'):
"""Expand a flat dict to a nested one.
This is an inverse of 'flatten_dict'.
:seealso: flatten_dict
"""
nested = {}
for k, v in target.items():
sub = nested
keys = k.split(namespace_sep)
for key in keys[:-1]:
sub = sub.setdefault(key, {})
sub[keys[-1]] = v
return nested
def flatten_dict(target, namespace_sep='.'):
"""Flatten a nested dict.
Return a one-level dict with all sub-level keys joined by a namespace
separator.
The following nested dict:
{'ns1': {'ns2a': {'ns3a': True, 'ns3b': False}, 'ns2b': 10}}
would be flattened to:
{'ns1.ns2a.ns3a': True, 'ns1.ns2a.ns3b': False, 'ns1.ns2b': 10}
"""
def flatten(target, keys, namespace_sep):
flattened = {}
if isinstance(target, abc.Mapping):
for k, v in target.items():
flattened.update(
flatten(v, keys
|
+ [k], namespace_sep))
else:
ns = namespace_sep.join(keys)
flattened[ns] = target
return flattened
return flatten(target, [], namespace_sep)
def build_file_path(base_dir, base_name, *extensions):
"""Build a path to a file in a given directory.
The file may have an extension(s).
:returns: Path such as: 'base_dir/base_name.ext1
|
.ext2.ext3'
"""
file_name = os.extsep.join([base_name] + list(extensions))
return os.path.expanduser(os.path.join(base_dir, file_name))
def to_bytes(value):
"""Convert numbers with a byte suffix to bytes.
"""
if isinstance(value, str):
pattern = re.compile(r'^(\d+)([K,M,G]{1})$')
match = pattern.match(value)
if match:
value = match.group(1)
suffix = match.group(2)
factor = {
'K': 1024,
'M': 1024 ** 2,
'G': 1024 ** 3,
}[suffix]
return int(round(factor * float(value)))
return value
def paginate_list(li, limit=None, marker=None, include_marker=False):
"""Paginate a list of objects based on the name attribute.
:returns: Page sublist and a marker (name of the last item).
"""
return pagination.paginate_object_list(
li, 'name', limit=limit, marker=marker, include_marker=include_marker)
def serialize_list(li, limit=None, marker=None, include_marker=False):
"""
Paginate (by name) and serialize a given object list.
:returns: A serialized and paginated version of a given list.
"""
page, next_name = paginate_list(li, limit=limit, marker=marker,
include_marker=include_marker)
return [item.serialize() for item in page], next_name
def get_filesystem_volume_stats(fs_path):
try:
stats = os.statvfs(fs_path)
except OSError:
raise RuntimeError("Filesystem not found (%s)" % fs_path)
total = stats.f_blocks * stats.f_bsize
free = stats.f_bfree * stats.f_bsize
# return the size in GB
used_gb = utils.to_gb(total - free)
total_gb = utils.to_gb(total)
output = {
'block_size': stats.f_bsize,
'total_blocks': stats.f_blocks,
'free_blocks': stats.f_bfree,
'total': total_gb,
'free': free,
'used': used_gb
}
return output
def get_conf_dir():
"""Get the config directory for the database related settings.
For now, the files inside the config dir are mainly for instance rebuild.
"""
mount_point = CONF.get(CONF.datastore_manager).mount_point
conf_dir = os.path.join(mount_point, 'conf.d')
if not operating_system.exists(conf_dir, is_directory=True, as_root=True):
operating_system.ensure_directory(conf_dir, as_root=True)
return conf_dir
|
mathLab/RBniCS
|
tests/unit/backends/dolfin/test_tensor_io.py
|
Python
|
lgpl-3.0
| 5,343
| 0.001684
|
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import pytest
from numpy import isclose
from dolfin import (assemble, dx, FiniteElement, FunctionSpace, inner, MixedElement, split, TestFunction,
TrialFunction, UnitSquareMesh, VectorElement)
from dolfin_utils.test import fixture as module_fixture
from rbnics.backends.dolfin import evaluate as _evaluate, ParametrizedTensorFactory
from rbnics.backends.dolfin.export import tensor_save
from rbnics.backends.dolfin.import_ import tensor_load
from rbnics.eim.utils.decorators import add_to_map_from_parametrized_expression_to_problem
# Meshes
@module_fixture
def mesh():
return UnitSquareMesh(10, 10)
# Forms: elliptic case
def generate_elliptic_linear_form_space(mesh):
return (FunctionSpace(mesh, "Lagrange", 2), )
def generate_elliptic_linear_form(V):
v = TestFunction(V)
return v * dx
def generate_elliptic_bilinear_form_space(mesh):
return generate_elliptic_linear_form_space(mesh) + generate_elliptic_linear_form_space(mesh)
def generate_elliptic_bilinear_form(V1, V2):
assert V1.ufl_element() == V2.ufl_element()
u = TrialFunction(V1)
v = TestFunction(V2)
return u * v * dx
# Forms: mixed case
def generate_mixed_linear_form_space(mesh):
elemen
|
t_0 = VectorElement("Lagrange", mesh.ufl_cell(), 2)
element_1 = F
|
initeElement("Lagrange", mesh.ufl_cell(), 1)
element = MixedElement(element_0, element_1)
return (FunctionSpace(mesh, element), )
def generate_mixed_linear_form(V):
v = TestFunction(V)
(v_0, v_1) = split(v)
return v_0[0] * dx + v_0[1] * dx + v_1 * dx
def generate_mixed_bilinear_form_space(mesh):
return generate_mixed_linear_form_space(mesh) + generate_mixed_linear_form_space(mesh)
def generate_mixed_bilinear_form(V1, V2):
assert V1.ufl_element() == V2.ufl_element()
u = TrialFunction(V1)
v = TestFunction(V2)
(u_0, u_1) = split(u)
(v_0, v_1) = split(v)
return inner(u_0, v_0) * dx + u_1 * v_1 * dx + u_0[0] * v_1 * dx + u_1 * v_0[1] * dx
# Forms: collapsed case
def generate_collapsed_linear_form_space(mesh):
element_0 = VectorElement("Lagrange", mesh.ufl_cell(), 2)
element_1 = FiniteElement("Lagrange", mesh.ufl_cell(), 1)
element = MixedElement(element_0, element_1)
U = FunctionSpace(mesh, element)
V = U.sub(0).collapse()
return (V, )
def generate_collapsed_linear_form(V):
v = TestFunction(V)
return v[0] * dx + v[1] * dx
def generate_collapsed_bilinear_form_space(mesh):
element_0 = VectorElement("Lagrange", mesh.ufl_cell(), 2)
element_1 = FiniteElement("Lagrange", mesh.ufl_cell(), 1)
element = MixedElement(element_0, element_1)
U = FunctionSpace(mesh, element)
V = U.sub(0).collapse()
return (V, U)
def generate_collapsed_bilinear_form(V, U):
u = TrialFunction(U)
(u_0, u_1) = split(u)
v = TestFunction(V)
return inner(u_0, v) * dx + u_1 * v[0] * dx
# Forms decorator
generate_form_spaces_and_forms = pytest.mark.parametrize("generate_form_space, generate_form", [
(generate_elliptic_linear_form_space, generate_elliptic_linear_form),
(generate_elliptic_bilinear_form_space, generate_elliptic_bilinear_form),
(generate_mixed_linear_form_space, generate_mixed_linear_form),
(generate_mixed_bilinear_form_space, generate_mixed_bilinear_form),
(generate_collapsed_linear_form_space, generate_collapsed_linear_form),
(generate_collapsed_bilinear_form_space, generate_collapsed_bilinear_form)
])
# Mock problem to avoid triggering an assert
class Problem(object):
mu = None
def evaluate(tensor):
add_to_map_from_parametrized_expression_to_problem(tensor, Problem())
return _evaluate(tensor)
# Prepare tensor storage for load
class Generator(object):
def __init__(self, form):
self._form = form
def zero_for_load(form):
tensor = assemble(form, keep_diagonal=True)
tensor.zero()
tensor.generator = Generator(form)
return tensor
# Tests
@generate_form_spaces_and_forms
def test_tensor_save(mesh, generate_form_space, generate_form, save_tempdir):
space = generate_form_space(mesh)
form = generate_form(*space)
tensor = ParametrizedTensorFactory(form)
evaluated_tensor = evaluate(tensor)
tensor_save(evaluated_tensor, save_tempdir, "evaluated_tensor")
@generate_form_spaces_and_forms
def test_tensor_load(mesh, generate_form_space, generate_form, load_tempdir):
space = generate_form_space(mesh)
form = generate_form(*space)
tensor = ParametrizedTensorFactory(form)
expected_evaluated_tensor = evaluate(tensor)
loaded_evaluated_tensor = zero_for_load(form)
tensor_load(loaded_evaluated_tensor, load_tempdir, "evaluated_tensor")
assert len(space) in (1, 2)
if len(space) == 1:
assert isclose(loaded_evaluated_tensor.get_local(), expected_evaluated_tensor.get_local()).all()
elif len(space) == 2:
assert isclose(loaded_evaluated_tensor.array(), expected_evaluated_tensor.array()).all()
@generate_form_spaces_and_forms
def test_tensor_io(mesh, generate_form_space, generate_form, tempdir):
test_tensor_save(mesh, generate_form_space, generate_form, tempdir)
test_tensor_load(mesh, generate_form_space, generate_form, tempdir)
|
timberline-secondary/hackerspace
|
src/profile_manager/migrations/0005_profile_get_messages_by_email.py
|
Python
|
gpl-3.0
| 484
| 0.002066
|
# Generated by Django 2.0.13 on 2019-08-10 20:16
f
|
rom django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profile_manager', '0004_auto_20190729_2101'),
]
operations = [
migrations.AddField(
model_name='profile',
name='get_messages_by_ema
|
il',
field=models.BooleanField(default=True, help_text='If your teacher sends you a message, get an instance email.'),
),
]
|
zbyte64/django-dockit
|
dockit/backends/djangodocument/migrations/0002_auto__chg_field_registeredindex_query_hash.py
|
Python
|
bsd-3-clause
| 7,475
| 0.007358
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'RegisteredIndex.query_hash'
db.alter_column('djangodocument_registeredindex', 'query_hash', self.gf('django.db.models.fields.CharField')(max_length=128))
def backwards(self, orm):
# Changing field 'RegisteredIndex.query_hash'
db.alter_column('djangodocument_registeredindex', 'query_hash', self.gf('django.db.models.fields.BigIntegerField')())
models = {
'djangodocument.booleanindex': {
'Meta': {'object_name': 'BooleanIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'djangodocument.dateindex': {
'Meta': {'object_name': 'DateIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {
|
'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.DateField', [], {'null': 'True'})
},
'djangodocument.datetimeindex': {
|
'Meta': {'object_name': 'DateTimeIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'djangodocument.decimalindex': {
'Meta': {'object_name': 'DecimalIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '10'})
},
'djangodocument.documentstore': {
'Meta': {'object_name': 'DocumentStore'},
'collection': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'data': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'djangodocument.floatindex': {
'Meta': {'object_name': 'FloatIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'djangodocument.integerindex': {
'Meta': {'object_name': 'IntegerIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'djangodocument.longindex': {
'Meta': {'object_name': 'LongIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'})
},
'djangodocument.registeredindex': {
'Meta': {'unique_together': "[('name', 'collection')]", 'object_name': 'RegisteredIndex'},
'collection': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'query_hash': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'djangodocument.registeredindexdocument': {
'Meta': {'object_name': 'RegisteredIndexDocument'},
'data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'doc_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documents'", 'to': "orm['djangodocument.RegisteredIndex']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'djangodocument.stringindex': {
'Meta': {'object_name': 'StringIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True'})
},
'djangodocument.textindex': {
'Meta': {'object_name': 'TextIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'djangodocument.timeindex': {
'Meta': {'object_name': 'TimeIndex'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djangodocument.RegisteredIndexDocument']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'value': ('django.db.models.fields.TimeField', [], {'null': 'True'})
}
}
complete_apps = ['djangodocument']
|
pkimber/kbsoftware_couk
|
settings/dev_test.py
|
Python
|
apache-2.0
| 247
| 0
|
from .local import *
|
DATABASES = {
'default': {
|
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'temp.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
OPBEAT['APP_ID'] = None
|
Etxea/gestioneide
|
gestioneide/migrations/0020_auto_20160523_1329.py
|
Python
|
gpl-3.0
| 471
| 0
|
#
|
-*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-05-23 11:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gestioneide', '0019_auto_20160517_2232'),
]
operations = [
migrations.AlterField(
model_name='festivo',
name='anotacion',
field=models.CharField(default=b'', max_length=50),
),
|
]
|
myth/trashcan
|
it3708/project3/modules/__init__.py
|
Python
|
gpl-2.0
| 116
| 0
|
# -*- co
|
ding: utf8 -*-
#
# Created by 'myth' on 2/19/16
import matplotlib as mpl
import set
|
tings
mpl.use('TkAgg')
|
andela-bojengwa/team3
|
monitorbot_api/app/main/users.py
|
Python
|
mit
| 3,988
| 0.007773
|
import json
from flask import g, jsonify, request, current_app, url_for
from ..models import User
from .. import db
from . import main
from .authentication import auth_user
from .errors import bad_request, unauthorized, forbidden, not_found
"""read all"""
@main.route('/<token>/users/', methods=['GET'])
def get_users(token):
if not auth_user(token):
return unauth
|
orized("You have to be logged in to perform this action")
# get and return all:
users = User.
|
query.all()
list_of_dicts = [json.loads(user.to_json()) for user in users]
return json.dumps(list_of_dicts)
"""read one"""
@main.route('/<token>/users/<int:id>/', methods=['GET'])
def get_user(token, id):
if not auth_user(token):
return unauthorized("You have to be logged in to perform this action")
# get and return one with id:
user = User.query.get(id)
if user == None:
not_found("Resource not found");
return user.to_json()
"""create"""
@main.route('/users/', methods=['POST']) #sign-up
def new_user():
# create and commit user
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
if email in ("", None) or password in ("", None):
return bad_request("Invalid request format!")
user = User(username=username, email=email)
user.password = password
db.session.add(user)
db.session.commit()
# get auth_token for the user:
auth_token = user.generate_auth_token(3600*24)
# create and send response
response = {}
response["user"] = user.to_json()
response["auth_token"] = auth_token
response["status"] = "success"
return jsonify(response)
"""update"""
@main.route('/<token>/users/<int:id>/', methods=['PUT'])
def update_user(token, id):
if not auth_user(token):
return unauthorized("You have to be logged in to perform this action")
user = User.query.get(id)
if not user:
not_found("Resource not found!")
# create and commit user
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
if email in ("", None) or password in ("", None):
return bad_request("Invalid request format!")
user.username = username
user.email = email
user.password = password
db.session.add(user)
db.session.commit()
# create and send response
response = {}
response["user"] = user.to_json()
response["status"] = "success"
return jsonify(response)
"""delete"""
@main.route('/<token>/users/<int:id>/', methods=["DELETE"])
def delete_user(token, id):
if not auth_user(token):
return unauthorized("You have to be logged in to perform this action")
user = User.query.get(id)
if not user:
not_found("Resource not found!")
# delete and commit
db.session.delete(user)
db.session.commit()
# ! delete associated watchs and checks!
#
# create and send response
response = {}
response["status"] = "success"
return json.dumps(response)
"""login"""
@main.route('/users/login/', methods=['POST'])
def login():
# get credentials
email = request.form.get('email')
password = request.form.get('password')
if email in ("", None) or password in ("", None):
return bad_request("Invalid request format!")
# check for a user with matching credentials
user = User.query.filter_by(email=email).first()
if user == None or user.verify_password(password)==False:
return bad_request("Invalid email or password!")
# set the global current_user
g.current_user = user
# get auth_token for the user
auth_token = user.generate_auth_token(3600*24) #1day
# create response
response = {}
response["user"] = user.to_json()
response["auth_token"] = auth_token
return jsonify(response)
|
VandroiyLabs/FaroresWind
|
faroreswind/collector/ElectronicNose.py
|
Python
|
gpl-3.0
| 2,352
| 0.019133
|
import serial
import numpy as np
import json
from datetime import datetime
class ElectronicNose:
def __init__(self, devAdd='/dev/ttyUSB0', baudrate=115200/3, \
tmax = 1000, outputFile = '', numSensors = 8):
## Creating the serial object
self.Sensor = serial.Serial(devAdd, baudrate)
self.memory = np.empty((0, numSensors + 2 + 1))
## File to store samples
if outputFile != '':
self.outfile = open(outputFile, 'a')
else:
self.outfile = []
## Writing the parameters
Vparam = '54'
if False: self.Sensor.write('P000' + 8*Vparam )
return
def save(self, filename):
np.save(filename, self.memory)
return
def closeConnection(self):
self.Sensor.close()
return
def forget(self):
self.memory = np.empty( (0, self.memory.shape[1] ) )
return
def refresh(self, nmax):
self.t[:self.tMax - nmax] = self.t[nmax:]
self.S[:self.tMax - nmax,:] = self.S[nmax:,:]
return
def sniff(self, nsamples=5):
# Flushing to ensure time precision
self.Sensor.flush()
# Possibly getting partial line -- this will be discarded
self.Sensor.readline()
avg = np.zeros( (1,11) )
nsamples_ = 0
for j in range(nsamples):
r = self.Sensor.readline()
if len(r) == 44:
nsamples_ += 1
|
avg[0,1:] += self.convert( r.split('\rV')[1].split('
|
\n')[0][8:39] )
if nsamples_ > 0:
avg = avg/float(nsamples_)
now = datetime.now()
avg[0,0] = now.hour*3600 + now.minute*60 + now.second + now.microsecond/1.e6
self.memory = np.concatenate( (self.memory, np.reshape(avg, (1,11)) ), axis=0 )
return
def convert(self, string):
s = np.zeros(10)
# Converting 8 sensors
for j in range(8):
s[j] = int( string[j*3:j*3+3] , 16 )
# Converting temperature and humidity
s[8] = int( string[24:28] , 16)
s[9] = int( string[28:31] , 16)
return s
if __name__ == "__main__":
# Instantiating the class
EN = ElectronicNose()
# Acquiring some data
EN.sniff(1000)
# Closing connection
EN.closeConnection()
|
1ukash/horizon
|
horizon/dashboards/project/volumes/tests.py
|
Python
|
apache-2.0
| 14,507
| 0.001103
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except i
|
n compliance with the License. You may obtain
# a copy of the License at
#
# http:
|
//www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.conf import settings
from django.core.urlresolvers import reverse
from django.forms import widgets
from mox import IsA
from horizon import api
from horizon import test
class VolumeViewTests(test.TestCase):
@test.create_stubs({api: ('tenant_quota_usages', 'volume_create',
'volume_snapshot_list')})
def test_create_volume(self):
volume = self.volumes.first()
usage = {'gigabytes': {'available': 250}, 'volumes': {'available': 6}}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50, 'snapshot_source': ''}
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_snapshots.list())
api.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
snapshot_id=None).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
redirect_url = reverse('horizon:project:volumes:index')
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({api: ('tenant_quota_usages', 'volume_create',
'volume_snapshot_list'),
api.nova: ('volume_snapshot_get',)})
def test_create_volume_from_snapshot(self):
volume = self.volumes.first()
usage = {'gigabytes': {'available': 250}, 'volumes': {'available': 6}}
snapshot = self.volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50, 'snapshot_source': snapshot.id}
# first call- with url param
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.nova.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
api.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
snapshot_id=snapshot.id).\
AndReturn(volume)
# second call- with dropdown
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_snapshots.list())
api.nova.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
api.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
snapshot_id=snapshot.id).\
AndReturn(volume)
self.mox.ReplayAll()
# get snapshot from url
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData)
redirect_url = reverse('horizon:project:volumes:index')
self.assertRedirectsNoFollow(res, redirect_url)
# get snapshot from dropdown list
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
redirect_url = reverse('horizon:project:volumes:index')
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({api: ('tenant_quota_usages',),
api.nova: ('volume_snapshot_get',)})
def test_create_volume_from_snapshot_invalid_size(self):
usage = {'gigabytes': {'available': 250}, 'volumes': {'available': 6}}
snapshot = self.volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 20, 'snapshot_source': snapshot.id}
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.nova.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"snapshot size (40GB)")
@test.create_stubs({api: ('tenant_quota_usages', 'volume_snapshot_list')})
def test_create_volume_gb_used_over_alloted_quota(self):
usage = {'gigabytes': {'available': 100, 'used': 20}}
formData = {'name': u'This Volume Is Huge!',
'description': u'This is a volume that is just too big!',
'method': u'CreateForm',
'size': 5000}
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_snapshots.list())
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'A volume of 5000GB cannot be created as you only'
' have 100GB of your quota available.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({api: ('tenant_quota_usages', 'volume_snapshot_list')})
def test_create_volume_number_over_alloted_quota(self):
usage = {'gigabytes': {'available': 100, 'used': 20},
'volumes': {'available': 0}}
formData = {'name': u'Too Many...',
'description': u'We have no volumes left!',
'method': u'CreateForm',
'size': 10}
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_snapshots.list())
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'You are already using all of your available'
' volumes.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({api: ('volume_list',
|
ellmetha/django-machina
|
tests/_testsite/apps/forum_conversation/migrations/0010_auto_20170120_0224.py
|
Python
|
bsd-3-clause
| 644
| 0.001553
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-20 01:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
|
dependencies = [
('forum_conversation', '0009_auto_20160925_2126'),
]
operations = [
migrations.AlterField(
m
|
odel_name='topic',
name='first_post',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='forum_conversation.Post', verbose_name='First post'),
),
]
|
PyQuake/earthquakemodels
|
code/gaModel/parallelGAModelP_AVR.py
|
Python
|
bsd-3-clause
| 6,277
| 0.035686
|
"""
This GA code creates the gaModel with a circular island model
"""
from operator import attrgetter
# import sys
from deap import base, creator, tools
import numpy
from csep.loglikelihood import calcLogLikelihood as loglikelihood
from models.mathUtil import calcNumberBins
import models.model
import random
import array
import multiprocessing
from mpi4py import MPI
import time
def evaluationFunction(individual, modelOmega, mean):
"""
This function calculates the loglikelihood of a model (individual) with
the real data from the prior X years (modelOmega, with length X).
It selects the smallest loglikelihood value.
"""
logValue = float('Infinity')
genomeModel=type(modelOmega[0])
for i in range(len(modelOmega)):
genomeModel.bins=list(individual)
modelLambda=type(modelOmega[0])
modelLambda.bins=calcNumberBins(genomeModel.bins, mean, modelOmega[i].values4poisson)
tempValue=loglikelihood(modelLambda, modelOmega[i])
if tempValue < logValue:
logValue = tempValue
return logValue,
def gaModel(NGEN,CXPB,MUTPB,modelOmega,year,region, mean, FREQ = 10, n_aval=50000):
"""
The main function. It evolves models, namely modelLamba or individual.
This applies the gaModel with a circular island model
It uses two parallel system: 1, simple, that splits the ga evolution between cores
and 2, that distributes the islands
"""
start = time.clock()
# Attribute generator
toolbox = base.Toolbox()
creator.create("FitnessFunction", base.Fitness, weights=(1.0,))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessFunction)
toolbox.register("attr_float", random.random)
toolbox.register("evaluate", evaluationFunction, modelOmega=modelOmega, mean= mean)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, len(modelOmega[0].bins))
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate", tools.cxOnePoint)
# operator for selecting individuals for breeding the next
# generation: each individual of the current generation
# is replaced by the 'fittest' (best) of three individuals
# drawn randomly from the current generation.
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("mutate", tools.mutPolynomialBounded,indpb=0.1, eta = 1, low = 0, up = 1)
#calculating the number of individuals of the populations based on the number of executions
y=int(n_aval/NGEN)
x=n_aval - y*NGEN
n= x + y
pop = toolbox.population(n)
logbook = tools.Logbook()
logbook.header = "min","avg","max","std"
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
# Evaluate the entire population
fitnesses = list(map(toolbox.evaluate, pop))#need to pass 2 model.bins. One is the real data, the other de generated model
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
#1 to NGEN
#creating comm and island model not fixed
target = 0
info = MPI.Status()
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
origin = (rank - (target+1)) % size
dest = (rank + ((target+1) + size)) % size
mpi_info = MPI.Info.Create()
logbook = tools.Logbook()
logbook.header = "rank","gen","min","avg","max","std"
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
for g in range(NGEN):
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# Clone the selected individuals{}
offspring = list(map(toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring, but the last island[rank] best_pop
#Elitism
best_pop = tools.selBest(pop, 1)[0]
offspring = sorted(offspring, key=attrgetter("fitness"), reverse = True)
offspring[len(offspring)-1]=best_pop
random.shuffle(offspring)
pop[:] = offspring
#migration
if g % (FREQ-1) == 0 and g > 0:
best_inds = tools.selBest(pop, 1)[0]
data = comm.sendrecv(sendobj=best_inds,dest=dest,source=origin)
#rotation
target+=1
origin = (rank - (target+1)) % size
dest = (rank + ((target+1) + size)) % size
pop[random.randint(0, len(pop)-1)] = ind
del best_pop
del data
|
#logBook
record = stats.compile(pop)
logbook.record(gen=g, **record)
# choose the best value
if rank == 0:
best_pop=tools.selBest(pop, 1)[0]
best_all_pop = list()
best_all_pop.append(best_pop)
for thread in range(size):
if (thread != 0):
# local_best = comm.recv(source=thread)
local_best = comm.recv(source=thread)
# req = comm.irecv(source=thread)
# local_b
|
est = req.wait()
best_all_pop.append(local_best)
maximum = float('-inf')
# for value, index in zip(best_all_pop, range(len(best_all_pop))):
for local_best in best_all_pop:
local_maximum = evaluationFunction(local_best, modelOmega, mean)
if maximum < local_maximum[0]:
# theBestIndex = index
maximum = local_maximum[0]
best_pop = local_best
else:
best_pop=tools.selBest(pop, 1)[0]
comm.send(best_pop, dest=0)
end = time.clock()
generatedModel = type(modelOmega[0])
generatedModel.prob = best_pop
generatedModel.bins = calcNumberBins(best_pop, modelOmega[0].bins)
generatedModel.loglikelihood = best_pop.fitness.values
generatedModel.definitions = modelOmega[0].definitions
generatedModel.time = start - end
generatedModel.logbook = logbook
return generatedModel
if __name__ == "__main__":
gaModel()
|
PierreBdR/point_tracker
|
doc/source/conf.py
|
Python
|
gpl-2.0
| 7,170
| 0.006834
|
# -*- coding: utf-8 -*-
#
# Point Tracker documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 25 14:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import os.path
from glob import glob
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath(os.path.join('..', '..', 'src')))
possible_jsmath_paths = [ os.path.join(os.environ['HOME'],'apps', 'network', 'jsMath*'),
os.path.join(os.environ['HOME'], 'apps', 'science', 'jsMath*'), '/usr/share/jsmath' ]
for filt in possible_jsmath_paths:
for pth in glob(filt):
if os.path.exists(os.path.join(pth, 'jsMath.js')):
jsmath_path = pth
break
else:
continue
break
else:
print >> sys.stderr, "Error, couldn't find the path for jsmath, please edit the possible_jsmath_paths variable."
sys.exit(2)
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.jsmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Point Tracker'
copyright = u'2010, Barbier de Reuille, Pierre'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7'
# The full version, including alpha/beta/rc tags.
release = '0.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.
|
css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template
|
names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'PointTrackerdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PointTracker.tex', u'Point Tracker Documentation',
u'Barbier de Reuille, Pierre', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
chris-j-tang/GLS
|
test/integration/ConstructorStart/simple.py
|
Python
|
mit
| 24
| 0.083333
|
-
d
|
ef __init__(self)
|
:
-
|
odin1314/sketchy
|
sketchy/controllers/tasks.py
|
Python
|
apache-2.0
| 16,270
| 0.004856
|
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto
import lxml.html as LH
import lxml.html.clean as clean
import os
import re
import json
import requests
from requests.exceptions import ConnectionError
from requests import post
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
from subprocess32 import PIPE
from collections import defaultdict
from sketchy import db, app, celery
from sketchy.models.capture import Capture
from sketchy.models.static import Static
from sketchy.controllers.validators import grab_domain
import subprocess32
@celery.task(name='check_url', bind=True)
def check_url(self, capture_id=0, retries=0, model='capture'):
"""
Check if a URL exists without downloading the whole file.
We only check the URL header.
"""
capture_record = Capture.query.filter(Capture.id == capture_id).first()
capture_record.job_status = 'STARTED'
# Write the number of retries to the capture record
db.session.add(capture_record)
capture_record.retry = retries
db.session.commit()
# Only retrieve the headers of the request, and return respsone code
try:
response = ""
verify_ssl = app.config['SSL_HOST_VALIDATION']
response = requests.get(capture_record.url, verify=verify_ssl, allow_redirects=False, headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:28.0) Gecko/20100101 Firefox/28.0"})
capture_record.url_response_code = response.status_code
if capture_record.status_only:
capture_record.job_status = 'COMPLETED'
capture_record.capture_status = '%s HTTP STATUS CODE' % (response.status_code)
if capture_record.callback:
finisher(capture_record)
|
else:
c
|
apture_record.capture_status = '%s HTTP STATUS CODE' % (response.status_code)
# If URL doesn't return a valid status code or times out, raise an exception
except Exception as err:
capture_record.job_status = 'RETRY'
capture_record.capture_status = str(err)
capture_record.url_response_code = 0
check_url.retry(kwargs={'capture_id': capture_id, 'retries': capture_record.retry + 1}, exc=err, countdown=app.config['COOLDOWN'], max_retries=app.config['MAX_RETRIES'])
# If the code was not a good code, record the status as a 404 and raise an exception
finally:
db.session.commit()
return str(response.status_code)
def do_capture(status_code, the_record, base_url, model='capture', phantomjs_timeout=app.config['PHANTOMJS_TIMEOUT']):
"""
Create a screenshot, text scrape, from a provided html file.
This depends on phantomjs and an associated javascript file to perform the captures.
In the event an error occurs, an exception is raised and handled by the celery task
or the controller that called this method.
"""
# Make sure the the_record
db.session.add(the_record)
# If the capture is for static content, use a differnet PhantomJS config file
if model == 'static':
capture_name = the_record.filename
service_args = [
app.config['PHANTOMJS'],
'--ssl-protocol=any',
'--ignore-ssl-errors=yes',
os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/assets/static.js',
app.config['LOCAL_STORAGE_FOLDER'],
capture_name]
content_to_parse = os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name)
else:
capture_name = grab_domain(the_record.url) + '_' + str(the_record.id)
service_args = [
app.config['PHANTOMJS'],
'--ssl-protocol=any',
'--ignore-ssl-errors=yes',
os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/assets/capture.js',
the_record.url,
os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name)]
content_to_parse = os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name + '.html')
# Using subprocess32 backport, call phantom and if process hangs kill it
pid = subprocess32.Popen(service_args, stdout=PIPE, stderr=PIPE)
try:
stdout, stderr = pid.communicate(timeout=phantomjs_timeout)
except subprocess32.TimeoutExpired:
pid.kill()
stdout, stderr = pid.communicate()
app.logger.error('PhantomJS Capture timeout at {} seconds'.format(phantomjs_timeout))
raise subprocess32.TimeoutExpired('phantomjs capture',phantomjs_timeout)
# If the subprocess has an error, raise an exception
if stderr or stdout:
raise Exception(stderr)
# Strip tags and parse out all text
ignore_tags = ('script', 'noscript', 'style')
with open(content_to_parse, 'r') as content_file:
content = content_file.read()
cleaner = clean.Cleaner()
content = cleaner.clean_html(content)
doc = LH.fromstring(content)
output = ""
for elt in doc.iterdescendants():
if elt.tag in ignore_tags:
continue
text = elt.text or ''
tail = elt.tail or ''
wordz = " ".join((text, tail)).strip('\t')
if wordz and len(wordz) >= 2 and not re.match("^[ \t\n]*$", wordz):
output += wordz.encode('utf-8')
# Since the filename format is different for static captures, update the filename
# This will ensure the URLs are pointing to the correct resources
if model == 'static':
capture_name = capture_name.split('.')[0]
# Wite our html text that was parsed into our capture folder
parsed_text = open(os.path.join(app.config['LOCAL_STORAGE_FOLDER'], capture_name + '.txt'), 'wb')
parsed_text.write(output)
# Update the sketch record with the local URLs for the sketch, scrape, and html captures
the_record.sketch_url = base_url + '/files/' + capture_name + '.png'
the_record.scrape_url = base_url + '/files/' + capture_name + '.txt'
the_record.html_url = base_url + '/files/' + capture_name + '.html'
# Create a dict that contains what files may need to be written to S3
files_to_write = defaultdict(list)
files_to_write['sketch'] = capture_name + '.png'
files_to_write['scrape'] = capture_name + '.txt'
files_to_write['html'] = capture_name + '.html'
# If we are not writing to S3, update the capture_status that we are completed.
if not app.config['USE_S3']:
the_record.job_status = "COMPLETED"
the_record.capture_status = "LOCAL_CAPTURES_CREATED"
else:
the_record.capture_status = "LOCAL_CAPTURES_CREATED"
db.session.commit()
return files_to_write
def s3_save(files_to_write, the_record):
"""
Write a sketch, scrape, and html file to S3
"""
db.session.add(the_record)
# These are the content-types for the files S3 will be serving up
reponse_types = {'sketch': 'image/png', 'scrape': 'text/plain', 'html': 'text/html'}
# Iterate through each file we need to write to s3
for capture_type, file_name in files_to_write.items():
# Connect to S3, generate Key, set path based on capture_type, write file to S3
conn = boto.s3.connect_to_region(
region_name = app.config.get('S3_BUCKET_REGION_NAME'),
calling_format = boto.s3.connection.OrdinaryCallingFormat()
)
key = Key(conn.get_bucket(app.config.get('S3_BUCKET_PREFIX')))
path = "sketchy/{}/{}".format(capture_type, the_record.id)
key.key = path
key.set_contents_from_filename(app.config['LOCAL_STORAGE_FOLDER'] + '/' + file_name)
# Generate a URL for downloading t
|
jaredhasenklein/the-blue-alliance
|
tests/suggestions/test_media_url_parse.py
|
Python
|
mit
| 9,927
| 0.004533
|
import json
import unittest2
from google.appengine.ext import testbed
from consts.media_type import MediaType
from helpers.media_helper import MediaParser
from helpers.webcast_helper import WebcastParser
class TestMediaUrlParser(unittest2.TestCase):
def setUp(cls):
cls.testbed = testbed.Testbed()
cls.testbed.activate()
cls.testbed.init_urlfetch_stub()
def tearDown(cls):
cls.testbed.deactivate()
def test_youtube_parse(self):
yt_long = MediaParser.partial_media_dict_from_url("http://www.youtube.com/watch?v=I-IrVbsl_K8")
self.assertEqual(yt_long['media_type_enum'], MediaType.YOUTUBE_VIDEO)
self.assertEqual(yt_long['foreign_key'], "I-IrVbsl_K8")
yt_short = MediaParser.partial_media_dict_from_url("http://youtu.be/I-IrVbsl_K8")
self.assertEqual(yt_short['media_type_enum'], MediaType.YOUTUBE_VIDEO)
self.assertEqual(yt_short['foreign_key'], "I-IrVbsl_K8")
yt_from_playlist = MediaParser.partial_media_dict_from_url("https://www.youtube.com/watch?v=VP992UKFbko&index=1&list=PLZT9pIgNOV6ZE0EgstWeoRWGWT3uoaszm")
self.assertEqual(yt_from_playlist['media_type_enum'], MediaType.YOUTUBE_VIDEO)
self.assertEqual(yt_from_playlist['foreign_key'], 'VP992UKFbko')
# def test_cdphotothread_parsetest_cdphotothread_parse(self):
# cd = MediaParser.partial_media_dict_from_url(
# "https://www.chiefdelphi.com/media/photos/41999")
# self.assertEqual(cd['media_type_enum'], MediaType.CD_PHOTO_THREAD)
# self.assertEqual(cd['foreign_key'], "41999")
# self.assertTrue(cd['details_json'])
# details = json.loads(cd['details_json'])
# self.assertEqual(details['image_partial'], "a88/a880fa0d65c6b49ddb93323bc7d2e901_l.jpg")
def test_imgur_parse(self):
imgur_img = MediaParser.partial_media_dict_from_url("http://imgur.com/zYqWbBh")
self.assertEqual(imgur_img['media_type_enum'], MediaType.IMGUR)
self.assertEqual(imgur_img['foreign_key'], "zYqWbBh")
imgur_img = MediaParser.partial_media_dict_from_url("http://i.imgur.com/zYqWbBh.png")
self.assertEqual(imgur_img['media_type_enum'], MediaType.IMGUR)
self.assertEqual(imgur_img['foreign_key'], "zYqWbBh")
self.assertEqual(MediaParser.partial_media_dict_from_url("http://imgur.com/r/aww"), None)
self.assertEqual(MediaParser.partial_media_dict_from_url("http://imgur.com/a/album"), None)
def test_fb_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("http://facebook.com/theuberbots")
self.assertEqual(result['media_type_enum'], MediaType.FACEBOOK_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'theuberbots')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.FACEBOOK_PROFILE])
self.assertEqual(result['profile_url'], 'https://www.facebook.com/theuberbots')
def test_twitter_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://twitter.com/team1124")
self.assertEqual(result['media_type_enum'], MediaType.TWITTER_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'team1124')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.TWITTER_PROFILE])
self.assertEqual(result['profile_url'], 'https://twitter.com/team1124')
def test_youtube_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://www.youtube.com/Uberbots1124")
self.assertEqual(result['media_type_enum'], MediaType.YOUTUBE_CHANNEL)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'uberbots1124')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.YOUTUBE_CHANNEL])
self.assertEqual(result['profile_url'], 'https://www.youtube.com/uberbots1124')
short_result = MediaParser.partial_media_dict_from_url("https://www.youtube.com/Uberbots1124")
self.assertEqual(short_result['media_type_enum'], MediaType.YOUTUBE_CHANNEL)
self.assertEqual(short_result['is_social'], True)
self.assertEqual(short_result['foreign_key'], 'uberbots1124')
self.assertEqual(short_result['site_name'], MediaType.type_names[MediaType.YOUTUBE_CHANNEL])
self.assertEqual(short_result['profile_url'], 'https://www.youtube.com/uberbots1124')
gapps_result = MediaParser.partial_media_dict_from_url("https://www.youtube.com/c/tnt3102org")
self.assertEqual(gapps_result['media_type_enum'], MediaType.YOUTUBE_CHANNEL)
self.assertEqual(gapps_result['is_social'], True)
self.assertEqual(gapps_result['foreign_key'], 'tnt3102org')
self.assertEqual(gapps_result['site_name'], MediaType.type_names[MediaType.YOUTUBE_CHANNEL])
self.assertEqual(gapps_result['profile_url'], 'https://www.youtube.com/tnt3102org')
def test_github_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://github.com/frc1124")
self.assertEqual(result['media_type_enum'], MediaType.GITHUB_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'frc1124')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.GITHUB_PROFILE])
self.assertEqual(result['profile_url'], 'https://github.com/frc1124')
def test_instagram_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://www.instagram.com/4hteamneutrino")
self.assertEqual(result['media_type_enum'], MediaType.INSTAGRAM_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], '4hteamneutrino')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.INSTAGRAM_PROFILE])
self.assertEqual(result['profile_url'], 'https://www.instagram.com/4hteamneutrino')
def test_periscope_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://www.periscope.tv/evolution2626")
self.assertEqual(result['media_type_enum'], MediaType.PERISCOPE_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'evolution2626')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.PERISCOPE_PROFILE])
self.assertEqual(result['profile_url'], 'https://www.periscope.tv/evolution2626')
def test_grabcad_link(self):
result = MediaParser.partial_media_dict_from_url("https://grabcad.com/library/2016-148-robowranglers-1")
self.assertEqual(result['media_type_enum'], MediaType.GRABCAD)
self.assertEqual(result['is_social'], False)
self.assertEqual(result['foreign_key'], '2016-148-robowranglers-1')
details = json.loads(result['details_json'])
self.assertEqual(details['model_name'], '2016 | 148 - Robowranglers')
self.assertEqual(details['model_description'], 'Renegade')
|
self.assertEqual(details['model_image'], 'https://d2t1xqejof9utc.cloudfront.net/screenshots/pics/96268d5c5e6c1b7fe8892f713813bb40/card.jpg')
self.assertEqual(details['model_created'], '2016-09-19T11:52:23Z')
def test_instagram_image(self):
result = MediaParser.partial_media_dict_from_url("https://www.instagram.com/p/BUnZiriBYre/")
self.assertEqual(result['media_type_enum'], Medi
|
aType.INSTAGRAM_IMAGE)
self.assertEqual(result['foreign_key'], "BUnZiriBYre")
details = json.loads(result['details_json'])
self.assertEqual(details['title'], "FRC 195 @ 2017 Battlecry @ WPI")
self.assertEqual(details['author_name'], '1stroboticsrocks')
self.assertIsNotNone(details.get('thumbnail_url', None))
def test_unsupported_url_parse(self):
self.assertEqual(MediaParser.partial_media_dict_from_url("http://foo.bar"), None)
class TestWebcastUrlParser(unittest2.TestCase):
@classmethod
def setUpClass(cls):
cls.testbed = testbed.Testbed()
cls.testbed.acti
|
guillaumelenoir/WAVEPAL
|
wavepal/tapering_window.py
|
Python
|
mit
| 2,207
| 0.043045
|
import numpy as np
from scipy.special import iv
def tapering_window(time,D,mywindow):
""" tapering_window returns the window for tapering a WOSA segment.
Inputs:
- time [1-dim numpy array of floats]: times along the WOSA segment.
- D [float]: Temporal length of the WOSA segment.
- mywindow [int]: Choice of tapering window:
-> 1: Square window
-> 2: Triangular window
-> 3: sin window
-> 4: sin**2 (Hanning) window
-> 5: sin**3 window
-> 6: sin**4 window
-> 7: Hamming wind
|
ow, defined as 0.54-0.46*np.cos(2.0*np.pi*time/D)
-> 8: 4-term Blackman-Harris window, with a0=0.35875 and a1=0.48829 and a2=0.14128 and a3=0.01168
-> 9: Kaiser-Bessel window, with parameter alpha=2.5
-> 1
|
0: Gaussian window, with standard dev. sigma=D/6.0
The terminology and formulas come from:
F. Harris. On the use of windows for harmonic analysis with the discrete fourier transform. Proceedings of the IEEE, 66(1):51-83, January 1978.
WARNING: Provide the vector 'time' such that for all k=0,...,time.size-1, we have time[k]>=0 and time[k]<=D
Outputs:
- tapering_window [1-dim numpy array of floats - size=time.size]: the tapering window.
-----------------------------
This is part of WAVEPAL
(C) 2016 G. Lenoir"""
T=time.size
if mywindow==1:
tapering_window=np.ones(T)
elif mywindow==2:
tapering_window=1.0-np.absolute(time-D/2.0)/(D/2.0)
elif mywindow==3:
tapering_window=np.sin(np.pi*time/D)
elif mywindow==4:
tapering_window=(np.sin(np.pi*time/D))**2
elif mywindow==5:
tapering_window=(np.sin(np.pi*time/D))**3
elif mywindow==6:
tapering_window=(np.sin(np.pi*time/D))**4
elif mywindow==7:
tapering_window=0.54-0.46*np.cos(2.0*np.pi*time/D)
elif mywindow==8:
a0=0.35875
a1=0.48829
a2=0.14128
a3=0.01168
tapering_window=a0-a1*np.cos(2.0*np.pi*time/D)+a2*np.cos(4.0*np.pi*time/D)-a3*np.cos(6.0*np.pi*time/D)
elif mywindow==9:
alpha=2.5
tapering_window=iv(0,np.pi*alpha*np.sqrt(1.0-((time-D/2.0)/(D/2.0))**2))
elif mywindow==10:
sig=D/6.0
tapering_window=np.exp(-(time-D/2.0)**2/2.0/sig**2)
else:
print "Error: The window number you entered is not valid. Check input variable 'mywindow'."
return
return tapering_window
|
Drob-AI/music-queue-rec
|
src/playlistsRecomender/gaPlaylistGenerator/__init__.py
|
Python
|
mit
| 25
| 0.04
|
from ga_star
|
ters
|
import *
|
Thetoxicarcade/ac
|
congredi/utils/test/test_progress.py
|
Python
|
gpl-3.0
| 391
| 0
|
#!/usr/bin/env
|
python
# -*- coding: utf-8 -*-
"""
progress test (count to 1000)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from ...utils.timing import TimedTestCase
from ..progress import together
class test_progress(Timed
|
TestCase):
def test_prog(self):
self.threshold = 0.1
together(1000)
|
lmazuel/azure-sdk-for-python
|
azure-servicefabric/azure/servicefabric/service_fabric_client_ap_is.py
|
Python
|
mit
| 582,969
| 0.001683
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from msrest.pipeline import ClientRawResponse
from . import models
class ServiceFabricClientAPIsConfiguration(Configuration):
"""Configuration for ServiceFabricClientAPIs
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
:param str base_url: Service URL
"""
def __init__(
self, credentials, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if not base_url:
base_url = 'http://localhost:19080'
super(ServiceFabricClientAPIsConfiguration, self).__init__(base_url)
self.add_user_agent('azure-servicefabric/{}'.format(VERSION))
self.credentials = credentials
class ServiceFabricClientAPIs(object):
"""Service Fabric REST Client APIs allows management of Service Fabric clusters, applications and services.
:ivar config: Configuration for client.
:vartype config: ServiceFabricClientAPIsConfiguration
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
:param str base_url: Service URL
"""
def __init__(
self, credentials, base_url=None):
self.config = ServiceFabricClientAPIsConfiguration(credentials, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '6.1.2'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
def get_cluster_manifest(
self, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Get the Service Fabric cluster manifest.
Get the Service Fabric cluster manifest. The cluster manifest contains
properties of the cluster that include different node types on the
cluster,
security configurations, fault and upgrade domain topologies etc.
These properties are specified as part of the ClusterConfig.JSON file
while deploying a stand alone cluster. However, most of the information
in the cluster manifest
is generated internally by service fabric during cluster deployment in
other deployment scenarios (for e.g when using azure portal).
The contents of the cluster manifest are for informational purposes
only and users are not expected to take a dependency on the format of
the file contents or its interpretation.
.
:param timeout: The server timeout for performing the operation in
seconds. This specifies the time duration that the client is willing
to wait for the requested operation to complete. The default value for
this parameter is 60 seconds.
:type timeout: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ClusterManifest or ClientRawResponse if raw=true
:rtype: ~azure.servicefabric.models.ClusterManifest or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`FabricErrorException<azure.servicefabric.models.FabricErrorException>`
"""
api_version = "6.0"
# Construct URL
url = '/$/GetClusterManifest'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1)
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.FabricErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ClusterManifest', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_cluster_health(
self, nodes_health_state_filter=0, applications_health_state_filter=0, events_health_state_filter=0, exclude_health_statistics=False, include_system_application_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config):
"""Gets the health of a Service Fabric cluster.
Gets the health of a Service Fabric cluster.
Use EventsHealthStateFilter to filter the collection of health events
reported on the cluster based on the health state.
Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter
to filter the collection of nodes and applications returned based on
their aggregated health state.
.
:param nodes_health_state_filter: Allows filtering of the node health
state objects returned in the result of cluster health query
based on their health state. The possible values for this parameter
include integer value of one of the
following health states. Only nodes that match the filter are
returned. All nodes are used to evaluate the aggregated health state.
If not specified, all entries are returned.
The state values are flag based enumeration, so the value could be a
combination of these values obtained using bitwise 'OR' operator.
For example, if the provided value is 6 then health state of nodes
with HealthState value of OK (2) and Warning (4) are returned.
- Default - Default value. Matches any HealthState. The value is zero.
- None - Filter that doesn't match any HealthState value. Used in
order to return no results on a given collection of states. The value
is 1.
- Ok - Filter that matches input with HealthState value Ok. The value
is 2.
- Warning - Filter that matches input with HealthState value Warning.
The value is 4.
- Error - Filter that matches input with HealthState value Error. The
value is 8.
- All - Filter that matches input with any HealthState value. The
value is 65535.
:type nodes_health_state_filter: int
:param applications_health_state_filter: Allows filtering of the
application health state objects returned in the result of cluster
health
query based on their health state.
The possible values for this paramet
|
er include i
|
nteger value obtained
from members or bitwise operations
on members of HealthStateFilter enumeration. Only applications that
match the filter are returned.
All applications are used to evaluate th
|
ThomasTheSpaceFox/Desutezeoid
|
plugins/test2.dzup.py
|
Python
|
gpl-3.0
| 1,889
| 0.044997
|
#!/usr/bin/env python
class PLUGIN_test_test2:
def __init__(self, screensurf, keylist,
|
vartree):
self.screensurf=screensurf
self.keylist=keylist
#best practice to init keyid variables during init, and default them to "0" (the null keyid)
self.keyid="0"
def fork(self, tagobj):
return
#core object. should either return None, or pygame Rect.
#if Rect is returned, the system will attempt to parse the standard
#"act" c
|
omponent, and associated related attributes...
#you may also want to use the provided click events in place of the standard act component.
#if you want hoverkey to be active, you MUST return a Rect!
#onkey/offkey masking is honored by the system regardless.
def core(self, tagobj):
if tagobj.tag=="test2":
self.xpos=int(tagobj.attrib.get("x"))
self.ypos=int(tagobj.attrib.get("y"))
#note: these core object tests are in blue
self.testrect=pygame.Rect(self.xpos, self.ypos, 60, 20)
pygame.draw.rect(self.screensurf, (0, 127, 255), self.testrect)
return self.testrect
#called every loop.
def pump(self):
return
#called on pygame mousebuttondown events
def click(self, event):
return
#called on pygame mousebuttonup events
def clickup(self, event):
return
#called upon page load
def pageclear(self):
return
#pause & resume can be useful for various things. such as properly extending timers. for that, its reccomended using the calculated seconds.
def pause(self, time):
print("plugin test2.dzup.py receved pause call.")
print(time)
#seconds referrs to a calculated seconds paused as a float.
def resume(self, seconds):
print("plugin test2.dzup.py receved resume call.")
print(seconds)
def keyup(self, event):
print("plugin test2.dzup.py receved KEYUP")
def keydown(self, event):
print("plugin test2.dzup.py receved KEYDOWN")
plugname="test plugin2"
plugclass=PLUGIN_test_test2
plugpath=None
|
taogeT/flask-celery
|
example/celery_run.py
|
Python
|
bsd-2-clause
| 101
| 0
|
#!/us
|
r/bin/env python
# -*- coding: UTF-8 -*-
from app import create_app, celery
app = create_ap
|
p()
|
join2saurav/Lexical-syntax-semantic-analysis-of-Hindi-text-
|
test10.py
|
Python
|
apache-2.0
| 256
| 0.019531
|
import collections
g=open("depth_29.txt","w")
with open('depth_28.txt') as infile:
counts = collections.Counter(l.strip() for l in infile)
|
for line, count in counts.most_common():
g.write(str(line))
|
#g.write(str(count))
g.write("\n")
|
thebjorn/dkcoverage
|
setup.py
|
Python
|
gpl-2.0
| 285
| 0
|
from distutils.core import setup
setup(
name='dkcoverage',
version='0.0.0',
packages=[''],
url='https://github.com/thebjorn/dkc
|
overage',
license='GPL v2',
author='bjorn',
author_email='bp@datakor
|
tet.no',
description='Run tests and compute coverage.'
)
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_shear_mite_broodling.py
|
Python
|
mit
| 444
| 0.047297
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS
|
MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_shear_mite_broodling.iff"
result.attribute_template_id = 9
result.stfName("monster_name","shear_mite")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
retu
|
rn result
|
robertbreker/sm
|
drivers/devscan.py
|
Python
|
lgpl-2.1
| 14,406
| 0.00833
|
#!/usr/bin/env python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import sys, os, re
import scsiutil, util
import xml.dom.minidom
import xs_errors, time
import glob
DEVPATH='/dev/disk/by-id'
DMDEVPATH='/dev/mapper'
SYSFS_PATH1='/sys/class/scsi_host'
SYSFS_PATH2='/sys/class/scsi_disk'
SYSFS_PATH3='/sys/class/fc_transport'
DRIVER_BLACKLIST = ['^(s|p|)ata_.*', '^ahci$', '^pdc_adma$', '^iscsi_tcp$']
INVALID_DEVICE_NAME = ''
def getManufacturer(s):
(rc,stdout,stderr) = util.doexec(['/sbin/modinfo', '-d', s])
if stdout:
return stdout.strip()
else:
return "Unknown"
def update_devs_dict(devs, dev, entry):
if dev != INVALID_DEVICE_NAME:
devs[dev] = entry
def adapters(filterstr="any"):
dict = {}
devs = {}
adt = {}
for a in os.listdir(SYSFS_PATH1):
proc = match_hbadevs(a, filterstr)
if not proc:
continue
adt[a] = proc
id = a.replace("host","")
scsiutil.rescan([id])
emulex = False
paths = []
if proc == "lpfc":
emulex = True
paths.append(SYSFS_PATH3)
else:
for p in [os.path.join(SYSFS_PATH1,a,"device","session*"),os.path.join(SYSFS_PATH1,a,"device"),\
os.path.join(SYSFS_PATH2,"%s:*"%id)]:
paths += glob.glob(p)
if not len(paths):
continue
for path in paths:
for i in filter(match_targets,os.listdir(path)):
tgt = i.replace('target','')
if emulex:
sysfs = os.path.join(SYSFS_PATH3,i,"device")
else:
sysfs = SYSFS_PATH2
for lun in os.listdir(sysfs):
if not match_LUNs(lun,tgt):
continue
if emulex:
dir = os.path.join(sysfs,lun)
else:
dir = os.path.join(sysfs,lun,"device")
(dev, entry) = _extract_dev(dir, proc, id, lun)
update_devs_dict(devs, dev, entry)
# for new qlogic sysfs layout (rport under device, then target)
for i in filter(match_rport,os.listdir(path)):
newpath = os.path.join(path, i)
for j in filter(match_targets,os.listdir(newpath)):
tgt = j.replace('target','')
sysfs = SYSFS_PATH2
for lun in os.listdir(sysfs):
if not match_LUNs(lun,tgt):
continue
dir = os.path.join(sysfs,lun,"device")
(dev, entry) = _extract_dev(dir, proc, id, lun)
update_devs_dict(devs, dev, entry)
# for new mptsas sysfs entries, check for phy* node
for i in filter(match_phy,os.listdir(path)):
(target,lunid) = i.replace('phy-','').split(':')
tgt = "%s:0:0:%s" % (target,lunid)
sysfs = SYSFS_PATH2
for lun in os.listdir(sysfs):
if not match_LUNs(lun,tgt):
continue
dir = os.path.join(sysfs,lun,"device")
(dev, entry) = _extract_dev(dir, proc, id, lun)
update_devs_dict(devs, dev, entry)
if path.startswith(SYSFS_PATH2):
os.path.join(path,"device","block:*")
dev = _extract_dev_name(os.path.join(path, 'device'))
if devs.has_key(dev):
continue
hbtl = os.path.basename(path)
(h,b,t,l) = hbtl.split(':')
entry = {'procname':proc, 'host':id, 'target':l}
update_devs_dict(devs, dev, entry)
dict['devs'] = devs
dict['adt'] = adt
return dict
def _get_driver_name(scsihost):
driver_name = 'Unknown'
if os.path.exists(os.path.join(SYSFS_PATH1, scsihost, 'fnic_state')):
driver_name = 'fnic'
if os.path.exists(os.path.join(SYSFS_PATH1, scsihost, 'lpfc_fcp_class')):
driver_name = 'lpfc'
if os.path.exists(os.path.join(SYSFS_PATH1, scsihost, '84xx_fw_version')):
driver_name = 'qla2xxx'
if 'Unknown' == driver_name:
namepath = os.path.join(SYSFS_PATH1, scsihost, 'driver_name')
if not os.path.exists(namepath):
namepath = os.path.join(SYSFS_PATH1, scsihost, 'proc_name')
if os.path.exists(namepath):
try:
f = open(namepath, 'r')
line = f.readline()[:-1]
f.close()
if not line in ['<NULL>', '(NULL)', '']:
driver_name = line
except IOError:
pass
if 'Unknown' == driver_name:
ueventpath = os.path.join(SYSFS_PATH1, scsihost, 'uevent')
if os.path.exists(ueventpath):
try:
f = open(ueventpath, 'r')
for line in f:
if line.startswith('PHYSDEVDRIVER='):
driver_name = line.replace('PHYSDEVDRIVER=','').strip()
f.close()
except IOError:
pass
return driver_name
def _parseHostId(str):
id = str.split()
val = "%s:%s:%s" % (id[1],id[3],id[5])
return val.replace(',','')
def _genMPPHBA(id):
devs = scsiutil.cacheSCSIidentifiers()
mppdict = {}
for dev in devs:
item = devs[dev]
if item[1] == id:
arr = scsiutil._genArrayIdentifier(dev)
if not len(arr):
continue
try:
cmd = ['/usr/sbin/mppUtil', '-a']
|
for line in util.doexec(cmd)[1].split('\n'):
if line.find(arr) != -1:
rec = line.split()[0]
cmd2 = ['/usr/sbin/mppUtil', '-g',rec]
li = []
for newline in util.doexec(cmd2)[1].split
|
('\n'):
if newline.find('hostId') != -1:
li.append(_parseHostId(newline))
mppdict[dev.split('/')[-1]] = li
except:
continue
return mppdict
def match_hbadevs(s, filterstr):
driver_name = _get_driver_name(s)
if match_host(s) and not match_blacklist(driver_name) \
and ( filterstr == "any" or match_filterstr(filterstr, driver_name) ):
return driver_name
else:
return ""
def match_blacklist(driver_name):
return re.search("(" + ")|(".join(DRIVER_BLACKLIST) + ")", driver_name)
def match_filterstr(filterstr, driver_name):
return re.search("^%s" % filterstr, driver_name)
def match_host(s):
return re.search("^host[0-9]", s)
def match_rport(s):
regex = re.compile("^rport-*")
return regex.search(s, 0)
def match_targets(s):
regex = re.compile("^target[0-9]")
return regex.search(s, 0)
def match_phy(s):
regex = re.compile("^phy-*")
return regex.search(s, 0)
def match_LUNs(s, prefix):
regex = re.compile("^%s" % prefix)
return regex.search(s, 0)
def match_dev(s):
regex = re.compile("^block:")
return regex.search(s, 0)
def _extract_dev_name(device_dir):
"""Returns the name of the block device from sysfs e.g. 'sda'"""
kernel_version = os.uname()[2]
if kernel_version.startswith('2.6'):
# sub-directory of form block:sdx/
dev = filter(match_dev, os.listdir(device
|
welch/rasl
|
tests/inner_test.py
|
Python
|
mit
| 4,843
| 0.00351
|
# test rasl inner loop on simulated data
#
# pylint:disable=import-error
from __future__ import division, print_function
import numpy as np
from rasl.inner import inner_ialm
from rasl import (warp_image_gradient, EuclideanTransform,
SimilarityTransform, AffineTransform, ProjectiveTransform)
def setup_function(_):
np.random.seed(0)
np.set_printoptions(threshold=np.inf,
formatter={'float_kind':lambda x: "%.3f" % x})
def gauss_image(h=60, v=60):
"""a gaussian image as described in RASL and RPCA papers"""
return np.random.normal(0, 1.0, (h, v))
def image_noise(likeimg, p=0.1):
"""sparse noise as described in RASL and RPCA papers"""
sgn = np.random.choice((-1.0, 1.0), size=likeimg.shape)
return sgn * np.random.binomial(1, p, size=likeimg.shape)
def inner_aligned(Ttype, inset=10):
"""don't mess (much) with a stack of aligned images"""
N = 40
image0 = gauss_image()
insetT = Ttype().inset(image0.shape, inset)
Image = [image0 for _ in range(N)]
TI, J = zip(*[warp_image_gradient(insetT, image, normalize=True)
for image in Image])
_, _, dParamv = inner_ialm(TI, J, tol=1e-4)
# for this test, verify that all images have same dParamv
# (inner insists on stepping dParamv a small amount when all images
# are aligned, so image comparisons are no good)
assert np.allclose(dParamv, dParamv[0], atol=1e-3)
def test_inner_aligned_similarity():
inner_aligned(SimilarityTransform)
def test_inner_aligned_euclidean():
inner_aligned(EuclideanTransform)
def test_inner_aligned_affine():
inner_aligned(AffineTransform)
def test_inner_aligned_projective():
inner_aligned(ProjectiveTransform)
def inner_jittered(T, inset=10, rtol=1e-3, atol=0):
"""move a stack of jittered noisy images in the direction of aligned"""
image0 = gauss_image()
Image = [image0 + image_noise(image0, p=.05) for _ in T]
T = [tform.inset(image0.shape, inset) for tform in T]
TImage, J = zip(*[warp_image_gradient(tform, image, normalize=True)
for tform, image in zip(T, Image)])
_, _, dParamv = inner_ialm(TImage, J, tol=1e-4)
# does dParamv move towards alignment? check if stdev of
# parameters decreased.
before = np.array([t.paramv for t in T])
beforeStd = np.std(before, 0)
after = np.array([t.paramv + dparamv
for t, dparamv in zip(T, dParamv)])
afterStd = np.std(after, 0)
assert np.all(np.logical_or(afterStd < beforeStd,
np.isclose(after, before, rtol=rtol, atol=atol)))
def test_inner_jittered_euclidean():
N = 40
dtheta, dx, dy= .05, 1, 1
Jitters = [[(np.random.random() * 2 - 1) * dtheta,
(np.random.random() * 2 - 1) * dx,
(np.random.random() * 2 - 1) * dy]
for _ in range(N)]
inner_jittered([EuclideanTransform(paramv=jitter) for jitter in Jitters])
def test_inner_jittered_similarity():
N = 40
ds, dtheta, dx, dy= .05, .05, 1, 1
Jitters = [[(np.random.random() * 2 - 1) * ds + 1,
(np.random.random() * 2 - 1) * dtheta,
(np.random.random() * 2 - 1) * dx,
(np.random.random() * 2 - 1) * dy]
for _ in range(N)]
inner_jittered([SimilarityTransform(paramv=jitter) for jitter in Jitters])
def test_inner_jittered_affine():
N = 40
ds, dtheta, dx = .05, .05, 1
Jitters = [[(np.random.random() * 2 - 1) * ds + 1.0,
(np.random.random() * 2 - 1) * dtheta,
(np.random.random() * 2 - 1) * dx,
(np.random.random() * 2 - 1) * dtheta,
(np.random.random() * 2 - 1) * ds + 1.0,
(np.random.random() * 2 - 1) * dx]
for _ in range(N)]
inner_jittered([AffineTransform(paramv=jitter) for jitter in Jitters])
def test_inner_jittered_projective():
# projective is a pain to test this way. the two projective
# parameters are badly conditioned and change too much in a single
# step. for now, set tolerance to disregard a wobbly step in the
# final two parameters, while assuring we converge the others.
N = 40
ds, dtheta, dx, dh = .05, .05, 1, 0.0005
Jitters = [[(np.random.random() * 2 - 1) * ds + 1,
(np.random.random() * 2 - 1) * dtheta,
(np.random.random() *
|
2 - 1) * dx,
|
(np.random.random() * 2 - 1) * dtheta,
(np.random.random() * 2 - 1) * ds + 1,
(np.random.random() * 2 - 1) * dx,
(np.random.random() * 2 - 1) * dh,
(np.random.random() * 2 - 1) * dh]
for _ in range(N)]
inner_jittered([ProjectiveTransform(paramv=jitter) for jitter in Jitters],
atol=.001)
|
cigroup-ol/metaopt
|
metaopt/objective/integer/failing/__init__.py
|
Python
|
bsd-3-clause
| 209
| 0
|
# -*- coding: utf-8 -*-
"""
Package of failing integer functi
|
ons.
"""
from metaopt.objective.integer.failing.f import
|
f as f
from metaopt.objective.integer.failing.g import f as g
FUNCTIONS_FAILING = [f, g]
|
coreycb/horizon
|
openstack_dashboard/test/integration_tests/tests/test_floatingips.py
|
Python
|
apache-2.0
| 4,543
| 0
|
# Copyright 2015 Hewlett-Packard Development Company, L.P
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
class TestFloatingip(he
|
lpers.TestCase):
"""Checks that the user is able to allocate/release floatingip."""
def test_floatingip(self):
floatingip_page = \
self.home_pg.go_to_compute_accessandsecurity_floatingipspage()
floating_ip = floatingip_page.allocate_f
|
loatingip()
self.assertTrue(
floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingip_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(floatingip_page.is_floatingip_present(floating_ip))
floatingip_page.release_floatingip(floating_ip)
self.assertTrue(
floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingip_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(floatingip_page.is_floatingip_present(floating_ip))
class TestFloatingipAssociateDisassociate(helpers.TestCase):
"""Checks that the user is able to Associate/Disassociate floatingip."""
def test_floatingip_associate_disassociate(self):
instance_name = helpers.gen_random_resource_name('instance',
timestamp=False)
instances_page = self.home_pg.go_to_compute_instancespage()
instances_page.create_instance(instance_name)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
instances_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(instances_page.is_instance_active(instance_name))
instance_ipv4 = instances_page.get_fixed_ipv4(instance_name)
instance_info = "{} {}".format(instance_name, instance_ipv4)
floatingip_page = \
self.home_pg.go_to_compute_accessandsecurity_floatingipspage()
floating_ip = floatingip_page.allocate_floatingip()
self.assertTrue(
floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingip_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(floatingip_page.is_floatingip_present(floating_ip))
self.assertEqual('-', floatingip_page.get_fixed_ip(floating_ip))
floatingip_page.associate_floatingip(floating_ip, instance_name,
instance_ipv4)
self.assertTrue(
floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingip_page.find_message_and_dismiss(messages.ERROR))
self.assertEqual(instance_info,
floatingip_page.get_fixed_ip(floating_ip))
floatingip_page.disassociate_floatingip(floating_ip)
self.assertTrue(
floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingip_page.find_message_and_dismiss(messages.ERROR))
self.assertEqual('-', floatingip_page.get_fixed_ip(floating_ip))
floatingip_page.release_floatingip(floating_ip)
self.assertTrue(
floatingip_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
floatingip_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(floatingip_page.is_floatingip_present(floating_ip))
instances_page = self.home_pg.go_to_compute_instancespage()
instances_page.delete_instance(instance_name)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
instances_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(instances_page.is_instance_deleted(instance_name))
|
uw-it-aca/myuw
|
myuw/views/page.py
|
Python
|
apache-2.0
| 6,010
| 0
|
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
import traceback
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.contrib.auth import logout as django_logout
from restclients_core.exceptions import DataFailureException
|
from myuw.dao import is_action_disabled
from myuw.dao.affiliation import get_all_affiliations
from myuw.dao.em
|
aillink import get_service_url_for_address
from myuw.dao.exceptions import (
EmailServiceUrlException, BlockedNetidErr)
from myuw.dao.gws import in_myuw_test_access_group
from myuw.dao.quicklinks import get_quicklink_data
from myuw.dao.card_display_dates import get_card_visibilty_date_values
from myuw.dao.messages import get_current_messages
from myuw.dao.term import add_term_data_to_context
from myuw.dao.user import get_updated_user, not_existing_user
from myuw.dao.user_pref import get_migration_preference
from myuw.dao.uwnetid import get_email_forwarding_for_current_user
from myuw.logger.timer import Timer
from myuw.logger.logresp import (
log_invalid_netid_response, log_page_view, log_exception)
from myuw.logger.session_log import (
log_session, is_native, log_session_end)
from myuw.util.settings import (
get_google_search_key, get_google_analytics_key, get_django_debug,
get_logout_url, no_access_check)
from myuw.views import prefetch_resources, get_enabled_features
from myuw.views.error import (
unknown_uwnetid, no_access, blocked_uwnetid, pws_error_404)
from django.contrib.auth.decorators import login_required
logger = logging.getLogger(__name__)
def page(request,
template,
context=None,
prefetch=True,
add_quicklink_context=False):
if context is None:
context = {}
timer = Timer()
try:
user = get_updated_user(request)
except DataFailureException as ex:
log_exception(logger, "PWS error", traceback)
if ex.status == 404:
if not_existing_user(request):
return unknown_uwnetid()
return pws_error_404()
return render(request, '500.html', status=500)
try:
if not can_access_myuw(request):
return no_access()
except DataFailureException:
log_exception(logger, "GWS error", traceback)
return render(request, '500.html', status=500)
netid = user.uwnetid
context["user"] = {
"netid": netid,
"isHybrid": is_native(request),
}
if prefetch:
# Some pages need to prefetch before this point
failure = try_prefetch(request, template, context)
if failure:
return failure
try:
affiliations = get_all_affiliations(request)
except BlockedNetidErr:
django_logout(request)
return blocked_uwnetid()
except DataFailureException as err:
log_exception(logger, err, traceback)
return render(request, '500.html', status=500)
user_pref = get_migration_preference(request)
log_session(request)
context["user"]["session_key"] = request.session.session_key
context["home_url"] = "/"
context["err"] = None
context["user"]["affiliations"] = affiliations
banner_messages = []
for message in get_current_messages(request):
banner_messages.append(message.message_body)
context["banner_messages"] = banner_messages
context["display_onboard_message"] = user_pref.display_onboard_message
context["display_pop_up"] = user_pref.display_pop_up
context["disable_actions"] = is_action_disabled()
_add_email_forwarding(request, context)
try:
context["card_display_dates"] = get_card_visibilty_date_values(request)
add_term_data_to_context(request, context)
except DataFailureException:
log_exception(logger, "SWS term data error", traceback)
context['enabled_features'] = get_enabled_features()
context['google_search_key'] = get_google_search_key()
context['google_analytics_key'] = get_google_analytics_key()
context['google_tracking_enabled'] = not get_django_debug()
if add_quicklink_context:
_add_quicklink_context(request, context)
log_page_view(timer, request, template)
return render(request, template, context)
def try_prefetch(request, template, context):
try:
prefetch_resources(request,
prefetch_migration_preference=True,
prefetch_enrollment=True,
prefetch_group=True,
prefetch_instructor=True,
prefetch_sws_person=True)
except DataFailureException:
log_exception(logger, "prefetch error", traceback)
context["webservice_outage"] = True
return render(request, template, context)
return
@login_required
def logout(request):
log_session_end(request)
django_logout(request) # clear the session data
if is_native(request):
return HttpResponse()
# Redirects to authN service logout page
return HttpResponseRedirect(get_logout_url())
def _add_quicklink_context(request, context):
link_data = get_quicklink_data(request)
for key in link_data:
context[key] = link_data[key]
def can_access_myuw(request):
return (no_access_check() or in_myuw_test_access_group(request))
def _add_email_forwarding(request, context):
my_uwemail_forwarding = get_email_forwarding_for_current_user(request)
c_user = context["user"]
if my_uwemail_forwarding and my_uwemail_forwarding.is_active():
try:
c_user['email_forward_url'] = get_service_url_for_address(
my_uwemail_forwarding.fwd)
return
except EmailServiceUrlException:
logger.error('No email url for {}'.format(
my_uwemail_forwarding.fwd))
return # MUWM-4700
c_user['email_forward_url'] = None
c_user['email_error'] = True
|
rusenask/stubo-app
|
stubo/static/cmds/tests/ext/auto_mangle/skip_xml/ignore.py
|
Python
|
gpl-3.0
| 867
| 0.00692
|
import logging
from stubo.ext.xmlutils import XPathValue
from stubo.ext.xmlexit import XMLManglerExit
log = logging.getLogger(__name__)
elements = dict(year=XPathValue('//dispatchTime/dateTime/year'),
month=XPathValue('//dispatchTime/dateTime/month'),
day=XPathValue('//dispatchTime/dateTime/day'),
hour=XPathValue('//dispatchTime/dateTime/hour'),
minutes=XPathValue('//dispatchTime/dateTime/minutes'),
seconds=XPathValue('//dispatchTime/dateTime/seconds'))
attrs = dict(y=XPathValue('//dispatchTime/date/@year'),
m=XPathValue('//dispatchTime/date/@month'),
d=XPathValue('//dispatchTime/date/@day'))
ignore = XMLManglerExit(elements=elements, attrs=attrs)
def ex
|
its(request, context):
ret
|
urn ignore.get_exit(request, context)
|
GeotrekCE/Geotrek-admin
|
geotrek/core/migrations/0001_initial.py
|
Python
|
bsd-2-clause
| 14,022
| 0.004921
|
from django.conf import settings
from django.db import migrations, models
import mapentity.models
import django.contrib.gis.db.models.fields
import django.db.models.deletion
import geotrek.common.mixins
import geotrek.authent.models
class Migration(migrations.Migration):
dependencies = [
('authent', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comfort',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('comfort', models.CharField(max_length=50, verbose_name='Comfort', db_column='confort')),
('structure', models.ForeignKey(db_column='structure', on_delete=django.db.models.deletion.CASCADE, default=geotrek.authent.models.default_structure_pk, verbose_name='Related structure', to='authent.Structure')),
],
options={
'ordering': ['comfort'],
'db_table': 'l_b_confort',
'verbose_name': 'Comfort',
'verbose_name_plural': 'Comforts',
},
),
migrations.CreateModel(
name='Network',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('network', models.CharField(max_length=50, verbose_name='Network', db_column='reseau')),
('structure', models.ForeignKey(db_column='structure', on_delete=django.db.models.deletion.CASCADE, default=geotrek.authent.models.default_structure_pk, verbose_name='Related structure', to='authent.Structure')),
],
options={
'ordering': ['network'],
'db_table': 'l_b_reseau',
'verbose_name': 'Network',
'verbose_name_plural': 'Networks',
},
),
migrations.CreateModel(
name='Path',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_insert', models.DateTimeField(auto_now_add=True, verbose_name='Insertion date', db_column='date_insert')),
('date_update', models.DateTimeField(auto_now=True, verbose_name='Update date', db_column='date_update')),
('geom_3d', django.contrib.gis.db.models.fields.GeometryField(dim=3, default=None, editable=False, srid=settings.SRID, null=True, spatial_index=False)),
('length', models.FloatField(db_column='longueur', default=0.0, editable=False, blank=True, null=True, verbose_name='3D Length')),
('ascent', models.IntegerField(db_column='denivelee_positive', default=0, editable=False, blank=True, null=True, verbose_name='Ascent')),
('descent', models.IntegerField(db_column='denivelee_negative', default=0, editable=False, blank=True, null=True, verbose_name='Descent')),
('min_elevation', models.IntegerField(db_column='altitude_minimum', default=0, editable=False, blank=True, null=True, verbose_name='Minimum elevation')),
('max_elevation', models.IntegerField(db_column='altitude_maximum', default=0, editable=False, blank=True, null=True, verbose_name='Maximum elevation')),
('slope', models.FloatField(db_column='pente', default=0.0, editable=False, blank=True, null=True, verbose_name='Slope')),
('geom', django.contrib.gis.db.models.fields.LineStringField(srid=settings.SRID, spatial_index=False)),
('geom_cadastre', django.contrib.gis.db.models.fields.LineStringField(srid=settings.SRID, spatial_index=False, null=True, editable=False)),
('valid', models.BooleanField(default=True, help_text='Approved by manager', verbose_name='Validity', db_column='valide')),
('visible', models.BooleanField(default=True, help_text='Shown in lists and maps', verbose_name='Visible', db_column='visible')),
('name', models.CharField(db_column='nom', max_length=20, blank=True, help_text='Official name', null=True, verbose_name='Name')),
('comments', models.TextField(help_text='Remarks', null=True, verbose_name='Comments', db_column='remarques', blank=True)),
('departure', models.CharField(db_column='depart', default='', max_length=250, blank=True, help_text='Departure place', null=True, verbose_name='Departure')),
('arrival', models.CharField(db_column='arrivee', default='', max_length=250, blank=True, help_text='Arrival place', null=True, verbose_name='Arrival')),
('eid', models.CharField(max_length=128, null=True, verbose_name='External id', db_column='id_externe', blank=True)),
('comfort', models.ForeignKey(related_name='paths', on_delete=django.db.models.deletion.CASCADE, db_column='confort', blank=True, to='core.Comfort', null=True, verbose_name='Comfort')),
('networks', models.ManyToManyField(related_name='paths', db_table='l_r_troncon_reseau', verbose_name='Networks', to='core.Network', blank=True)),
],
options={
'db_table': 'l_t_troncon',
'verbose_name': 'Path',
'verbose_name_plural': 'Paths',
},
bases=(geotrek.common.mixins.AddPropertyMixin, mapentity.models.MapEntityMixin, models.Model),
),
migrations.CreateModel(
name='PathAggregation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start_position', models.FloatField(verbose_name='Start position', db_column='pk_debut', db_index=True)),
('end_position', models.FloatField(verbose_name='End position', db_column='pk_fin', db_index=True)),
('order', models.IntegerField(default=0, null=True, verbose_name='Order', db_column='ordre', blank=True)),
('path', models.ForeignKey(related_name='aggregations', on_delete=django.db.models.deletion.DO_NOTHING, db_column='troncon', verbose_name='Path', to='core.Path')),
],
options={
'ordering': ['order'],
'db_table': 'e_r_evenement_troncon',
'verbose_name': 'Path aggregation',
'verbose_name_plural': 'Path aggregations',
},
),
migrations.CreateModel(
name='PathSource',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('source', models.CharField(max_length=50, verbose_name='Source')),
('structure', models.ForeignKey(db_column='structure', on_delete=django.db.models.deletion.CASCADE, default=geotrek.authent.models.default_structure_pk, verbose_name='Related structure', to='authent.Structure')),
],
|
options={
'ordering': ['source'],
'db_table': 'l_b_source_troncon',
'verbose_name': 'Path source',
'verbose_name_plural': 'Path sources',
},
),
migrations.CreateModel(
name='Stake',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('stake', models.CharField(max_length=50, verbose_name='Stake', db_col
|
umn='enjeu')),
('structure', models.ForeignKey(db_column='structure', on_delete=django.db.models.deletion.CASCADE, default=geotrek.authent.models.default_structure_pk, verbose_name='Related structure', to='authent.Structure')),
],
options={
'ordering': ['id'],
'db_table': 'l_b_enjeu',
'verbose_name': 'Maintenance stake',
'verbose_name_plural': 'Maintenance stakes',
},
),
migrations.CreateModel(
name='Topology',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=Tr
|
motealle/python
|
01.py
|
Python
|
gpl-2.0
| 53
| 0
|
a = '
|
sdlbapm'
b = 'alam'
for d in a:
pr
|
int d + b
|
dario-chiappetta/Due
|
due/models/test_tfidf.py
|
Python
|
gpl-3.0
| 3,631
| 0.024787
|
import unittest
from datetime import datetime
import tempfile
import os
from due.agent import Agent
from due.episode import Episode
from due.event import Event
from due.persistence import serialize, deserialize
fr
|
om due.models.tfidf import TfIdfAgent
from due.models.dummy im
|
port DummyAgent
class TestTfIdfAgent(unittest.TestCase):
def test_save_load(self):
agent = TfIdfAgent()
agent.learn_episodes(_get_train_episodes())
saved_agent = agent.save()
with tempfile.TemporaryDirectory() as temp_dir:
path = os.path.join(temp_dir, 'serialized_tfidf_agent.due')
serialize(saved_agent, path)
loaded_agent = Agent.load(deserialize(path))
assert agent.parameters == loaded_agent.parameters
assert agent._normalized_past_utterances == loaded_agent._normalized_past_utterances
assert [e.save() for e in loaded_agent._past_episodes] == [e.save() for e in agent._past_episodes]
expected_utterance = agent._process_utterance('aaa bbb ccc mario')
loaded_utterance = loaded_agent._process_utterance('aaa bbb ccc mario')
assert (agent._vectorizer.transform([expected_utterance]) != loaded_agent._vectorizer.transform([loaded_utterance])).nnz == 0
assert (agent._vectorized_past_utterances != loaded_agent._vectorized_past_utterances).nnz == 0
assert agent.utterance_callback(_get_test_episode())[0].payload, loaded_agent.utterance_callback(_get_test_episode())[0].payload
def test_utterance_callback(self):
agent = TfIdfAgent()
agent.learn_episodes(_get_train_episodes())
result = agent.utterance_callback(_get_test_episode())
self.assertEqual(result[0].payload, 'bbb')
def test_tfidf_agent(self):
cb = TfIdfAgent()
# Learn sample episode
sample_episode, alice, bob = _sample_episode()
cb.learn_episodes([sample_episode])
# Predict answer
e2 = alice.start_episode(bob)
alice.say("Hi!", e2)
answer_events = cb.utterance_callback(e2)
self.assertEqual(len(answer_events), 1)
self.assertEqual(answer_events[0].payload, 'Hello')
def test_agent_load(self):
sample_episode, alice, bob = _sample_episode()
cb = TfIdfAgent()
cb.learn_episodes([sample_episode])
test_dir = tempfile.mkdtemp()
test_path = os.path.join(test_dir, 'test_agent_load.pkl')
serialize(cb.save(), test_path)
loaded_cb = Agent.load(deserialize(test_path))
self.assertIsInstance(loaded_cb, TfIdfAgent)
e2 = alice.start_episode(bob)
alice.say("Hi!", e2)
answer_events = loaded_cb.utterance_callback(e2)
self.assertEqual(len(answer_events), 1)
self.assertEqual(answer_events[0].payload, 'Hello')
def _get_train_episodes():
result = []
e = Episode('a', 'b')
e.events = [
Event(Event.Type.Utterance, datetime.now(), 'a', 'aaa'),
Event(Event.Type.Utterance, datetime.now(), 'b', 'bbb'),
Event(Event.Type.Utterance, datetime.now(), 'a', 'ccc'),
Event(Event.Type.Utterance, datetime.now(), 'b', 'ddd')
]
result.append(e)
e = Episode('1', '2')
e.events = [
Event(Event.Type.Utterance, datetime.now(), '1', '111'),
Event(Event.Type.Utterance, datetime.now(), '2', '222'),
Event(Event.Type.Utterance, datetime.now(), '1', '333'),
Event(Event.Type.Utterance, datetime.now(), '2', '444')
]
result.append(e)
return result
def _get_test_episode():
e = Episode('a', 'b')
e.events = [
Event(Event.Type.Utterance, datetime.now(), 'a', 'aaa'),
]
return e
def _sample_episode():
alice = DummyAgent('alice')
bob = DummyAgent('bob')
result = alice.start_episode(bob)
alice.say("Hi!", result)
bob.say("Hello", result)
alice.say("How are you?", result)
bob.say("Good thanks, and you?", result)
alice.say("All good", result)
return result, alice, bob
|
florence-nocca/spanish-elections
|
retrieve-accounts/searx.py
|
Python
|
mit
| 4,109
| 0.004138
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unicodedata
from urlparse import urlparse
from threading import Thread
import httplib, sys
from Queue import Queue
import itertools
import codecs
import csv
import sys
import ssl
import re
if len(sys.argv) < 3:
print "Usage: %s <csv database> <out csv>" % (sys.argv[0])
exit()
# Unicode CSV reader
# http://stackoverflow.com/a/6187936
class UnicodeCsvReader(object):
def __init__(self, f, encoding="utf-8", **kwargs):
self.csv_reader = csv.reader(f, **kwargs)
self.encoding = encoding
def __iter__(self):
return self
def next(self):
# read and split the csv row into fields
row = self.csv_reader.next()
# now decode
return [unicode(cell, self.encoding) for cell in row]
@property
def line_num(self):
return self.csv_reader.line_num
class UnicodeDictReader(csv.DictReader):
def __init__(self, f, encoding="utf-8", fieldnames=None, **kwds):
csv.DictReader.__init__(self, f, fieldnames=fieldnames, **kwds)
self.reader = UnicodeCsvReader(f, encoding=encoding, **kwds)
# Remove particles and parenthesis in names
def cleanNames(names):
filtered_names = []
for word in names:
if len(word) and word[0].lower() != word[0]:
filtered_names.append(word)
return filtered_names
# Strips accents from a unicode string
def stripAccents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')
# Generates all 2+ permutations of the given array
def allCombinations(tab):
out = []
for n in range(2, len(tab) + 1):
for comb in itertools.combinations(tab, n):
out.append(" ".join(comb))
return out
# Cycles through available urls and returns the next one in the list
def getNextBaseURL():
out = getNextBaseURL.urllist[getNextBaseURL.counter % len(getNextBaseURL.urllist)]
getNextBaseURL.counter += 1
return out
getNextBaseURL.counter = 0
getNextBaseURL.urllist = [l.strip() for l in open("urls.txt", "r")]
def fetchHandles(ourl, handles):
try:
url = urlparse(ourl)
conn = httplib.HTTPSConnection(url.netloc, context=ssl._create_unverified_context())
conn.request("GET", ourl)
res = conn.getresponse()
if res.status != 200:
print res.reason, ourl
return
for line in csv.reader((l for l in res.read().split("\n")[1:])):
if len(line) < 2:
continue
match = re.match('https?://twitter.com/(\w+)[^/]*$', line[1])
if match:
handle = match.group(1)
if handle not in handles:
handles.append(handle)
except Exception, e:
print "Error(%s): %s" % (ourl, e)
exit()
return
def doQueries():
base = getNextBaseURL()
while True:
name
|
s, region, party = q.get()
clean_names = cleanNames(stripAccents(names).split(" "))
handles = []
for comb in allCombinations(clean_names):
query =
|
comb.replace(" ", "+") + "+" + region + "+" + party + "+site:twitter.com"
url = base + "/?format=csv&q=" + query
fetchHandles(url, handles)
with codecs.open(sys.argv[2], "a", "utf-8") as out:
out.write("%s, %s\n" % (names, handles))
print "%s, %s" % (names, handles)
q.task_done()
concurrent = 50
q = Queue(concurrent * 2)
for i in range(concurrent):
t = Thread(target=doQueries)
t.daemon = True
t.start()
try:
with open(sys.argv[1], 'rb') as csvfile:
first = True
for line in UnicodeCsvReader(csvfile):
if first:
first = False
continue
names = line[0]
region = stripAccents(line[3]).replace(" ", "+")
party = stripAccents(line[5]).replace(" ", "+")
if party == "C's" or party == u"C´s":
party = "Ciudadanos"
q.put((names, region, party))
q.join()
except KeyboardInterrupt:
sys.exit(1)
|
Kate-Willett/HadISDH_Build
|
TestLeap.py
|
Python
|
cc0-1.0
| 632
| 0.006329
|
#!/usr/local/sci/bin/python
# PYTHON2.7
# import TestLeap
# TestVal = TestLeap.TestLeap(year)
import numpy as np
def TestLeap(year):
'''function to test if a year is a leap year'''
'''returns 0.0 if it is a leap year'''
'''returns a non-zero number if it is not a leap year'''
'''ONLY WORKS WITH SCALARS!!!'''
# first test - is it divisible by 4?
leapoo = (year/4.) -
|
np.round(year/4.)
# second test - if it is divisible by 100
|
. then is it also divisible by 400?
if (((year/100.) - np.round(year/100.)) == 0.):
leapoo = leapoo + ((year/400.) - np.round(year/400.))
return leapoo
|
colinsheppard/beam
|
aws/src/main/python/updateBeamAMI/lambda_function.py
|
Python
|
gpl-3.0
| 2,524
| 0.009113
|
# coding=utf-8
import time
import json
import boto3
from botocore.errorfactory import ClientError
def lambda_handler(event, context):
instance_id = event.get('instance_id')
region_id = event.get('region_id', 'us-east-2')
image_name = 'beam-automation-'+time.strftime("%Y-%m-%d-%H%M%S", time.gmtime())
image_ids = {}
image_ids['us-east-2'] = create_ami(image_name, instance_id)
image_ids['us-east-1'] = copy_ami(image_name, image_ids['us-east-2'], 'us-east-1')
image_ids['us-west-2'] = copy_ami(image_name, image_ids['us-east-2'], 'us-west-2')
update_lambda(image_ids)
return json.dumps(image_ids)
def create_ami(image_name, instance_id):
ec2 = boto3.client('ec2',region_name='us-east-2')
res = ec2.create_image(InstanceId=instance_id,
Name=image_name)
wait4image(ec2, res['ImageId'])
ec2.terminate_instances(InstanceIds=[instance_id])
return res['ImageId']
def copy_ami(image_name, image_id, region):
ec2 = boto3.client('ec2',region_name=region)
res = ec2.c
|
opy_image(Name=image_name,
SourceImageId=image_id,
|
SourceRegion='us-east-2')
# wait4image(ec2, res['ImageId'])
return res['ImageId']
def wait4image(ec2, image_id):
waiter = ec2.get_waiter('image_available')
waiter.wait(Filters=[{'Name': 'state', 'Values': ['available']}],
ImageIds=[image_id])
def update_lambda(image_ids):
lm = boto3.client('lambda')
en_var = lm.get_function_configuration(FunctionName='simulateBeam')['Environment']['Variables']
en_var.update({
'us_east_2_IMAGE_ID': image_ids['us-east-2'],
'us_east_1_IMAGE_ID': image_ids['us-east-1'],
'us_west_2_IMAGE_ID': image_ids['us-west-2'],
})
lm.update_function_configuration(
FunctionName='simulateBeam',
Environment={
'Variables': en_var
}
)
def check_instance_id(instance_ids):
for reservation in ec2.describe_instances()['Reservations']:
for instance in reservation['Instances']:
if instance['InstanceId'] in instance_ids:
instance_ids.remove(instance['InstanceId'])
return instance_ids
def stop_instance(instance_ids):
return ec2.stop_instances(InstanceIds=instance_ids)
def terminate_instance(instance_ids):
return ec2.terminate_instances(InstanceIds=instance_ids)
|
kaji-project/pynag
|
pynag/Parsers/__init__.py
|
Python
|
gpl-2.0
| 129,457
| 0.001808
|
# -*- coding: utf-8 -*-
#
# pynag - Python Nagios plug-in and configuration environment
# Copyright (C) 2010 Drew Stinnet
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""This module contains low-level Parsers for nagios configuration and status objects.
Hint: If you are looking to parse some nagios configuration data, you probably
want pynag.Model module instead.
The highlights of this module are:
class Config: For Parsing nagios local nagios configuration files
class Livestatus: To connect to MK-Livestatus
class StatusDat: To read info from status.dat (not used a lot, migrate to mk-livestatus)
class LogFiles: To read nagios log-files
class MultiSite: To talk with multiple Livestatus instances
"""
import os
import re
import time
import sys
import socket # for mk_livestatus
import stat
import pynag.Plugins
import pynag.Utils
import StringIO
import tarfile
_sentinel = object()
class Config(object):
""" Parse and write nagios config files """
# Regex for beginning of object definition
# We want everything that matches:
# define <object_type> {
__beginning_of_object = re.compile("^\s*define\s+(\w+)\s*\{?(.*)$")
def __init__(self, cfg_file=None, strict=False):
""" Constructor for :py:class:`pynag.Parsers.config` class
Args:
cfg_file (str): Full path to nagios.cfg. If None, try to
auto-discover location
strict (bool): if True, use stricter parsing which is more prone to
raising exceptions
"""
self.cfg_file = cfg_file # Main configuration file
self.strict = strict # Use strict parsing or not
# If nagios.cfg is not set, lets do some minor autodiscover.
if self.cfg_file is None:
self.cfg_file = self.guess_cfg_file()
self.data = {}
self.maincfg_values = []
self._is_dirty = False
self.reset() # Initilize misc member variables
def guess_nagios_directory(self):
""" Returns a path to the nagios configuration directory on your system
Use this function for determining the nagios config directory in your
code
Returns:
str. directory containing the nagios.cfg file
Raises:
:py:class:`pynag.Parsers.ConfigFileNotFound` if cannot guess config
file location.
"""
cfg_file = self.guess_cfg_file()
if not cfg_file:
raise ConfigFileNotFound("Could not find nagios.cfg")
return os.path.dirname(cfg_file)
def guess_nagios_binary(self):
""" Returns a path to any nagios binary found on your system
U
|
se this function if you don't want specify path to the nagios binary
in your code and you are confident that it is located in a common
location
Checked locations are as follows:
* /usr/bin/nagios
* /usr/sbin/nagios
* /usr/local/
|
nagios/bin/nagios
* /nagios/bin/nagios
* /usr/bin/icinga
* /usr/sbin/icinga
* /usr/bin/naemon
* /usr/sbin/naemon
* /usr/local/naemon/bin/naemon.cfg
* /usr/bin/shinken
* /usr/sbin/shinken
Returns:
str. Path to the nagios binary
None if could not find a binary in any of those locations
"""
possible_files = ('/usr/bin/nagios',
'/usr/sbin/nagios',
'/usr/local/nagios/bin/nagios',
'/nagios/bin/nagios',
'/usr/bin/icinga',
'/usr/sbin/icinga',
'/usr/bin/naemon',
'/usr/sbin/naemon',
'/usr/local/naemon/bin/naemon.cfg',
'/usr/bin/shinken',
'/usr/sbin/shinken')
possible_binaries = ('nagios', 'nagios3', 'naemon', 'icinga', 'shinken')
for i in possible_binaries:
command = ['which', i]
code, stdout, stderr = pynag.Utils.runCommand(command=command, shell=False)
if code == 0:
return stdout.splitlines()[0].strip()
return None
def guess_cfg_file(self):
""" Returns a path to any nagios.cfg found on your system
Use this function if you don't want specify path to nagios.cfg in your
code and you are confident that it is located in a common location
Checked locations are as follows:
* /etc/nagios/nagios.cfg
* /etc/nagios3/nagios.cfg
* /usr/local/nagios/etc/nagios.cfg
* /nagios/etc/nagios/nagios.cfg
* ./nagios.cfg
* ./nagios/nagios.cfg
* /etc/icinga/icinga.cfg
* /usr/local/icinga/etc/icinga.cfg
* ./icinga.cfg
* ./icinga/icinga.cfg
* /etc/naemon/naemon.cfg
* /usr/local/naemon/etc/naemon.cfg
* ./naemon.cfg
* ./naemon/naemon.cfg
* /etc/shinken/shinken.cfg
Returns:
str. Path to the nagios.cfg or equivalent file
None if couldn't find a file in any of these locations.
"""
possible_files = ('/etc/nagios/nagios.cfg',
'/etc/nagios3/nagios.cfg',
'/usr/local/nagios/etc/nagios.cfg',
'/nagios/etc/nagios/nagios.cfg',
'./nagios.cfg',
'./nagios/nagios.cfg',
'/etc/icinga/icinga.cfg',
'/usr/local/icinga/etc/icinga.cfg',
'./icinga.cfg',
'./icinga/icinga.cfg',
'/etc/naemon/naemon.cfg',
'/usr/local/naemon/etc/naemon.cfg',
'./naemon.cfg',
'./naemon/naemon.cfg',
'/etc/shinken/shinken.cfg',
)
for file_path in possible_files:
if self.isfile(file_path):
return file_path
return None
def reset(self):
""" Reinitializes the data of a parser instance to its default values.
"""
self.cfg_files = [] # List of other configuration files
self.data = {} # dict of every known object definition
self.errors = [] # List of ParserErrors
self.item_list = None
self.item_cache = None
self.maincfg_values = [] # The contents of main nagios.cfg
self._resource_values = [] # The contents of any resource_files
self.item_apply_cache = {} # This is performance tweak used by _apply_template
# This is a pure listof all the key/values in the config files. It
# shouldn't be useful until the items in it are parsed through with the proper
# 'use' relationships
self.pre_object_list = []
self.post_object_list = []
self.object_type_keys = {
'hostgroup': 'hostgroup_name',
'hostextinfo': 'host_name',
'host': 'host_name',
'service': 'name',
'servicegroup': 'servicegroup_name',
'contact': 'contact_name',
'contactgroup': 'contactgroup_name',
'timeperiod': 'timeperiod_name',
'command': 'command_name',
#'service':['host_name','description'],
}
def _has_template(self, target):
""" Determine if an item has a template associated with it
|
evan-rusin/fly-project
|
mygoals/views.py
|
Python
|
bsd-2-clause
| 6,398
| 0.00297
|
from datetime import datetime, timedelta, timezone
from django.shortcuts import render
from django.core.management import call_command
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext_lazy as _
from fly_project import settings, constants
from api.models import SavingsGoal, CreditGoal, FinalGoal
def count_days_between(dt1, dt2):
"""Function will return an integer of day numbers between two dates."""
dt1 = dt1.replace(hour=0, minute=0, second=0, microsecond=0)
dt2 = dt2.replace(hour=0, minute=0, second=0, microsecond=0)
return (dt2 - dt1).days
def
|
count_days_between_today_and(dt2):
# Detect whether the unlocked time has elapsed and load the appropriate
# UI associated with this.
now = datetime.now(timezone.utc) # Standardize date to a specific time-zone
# Count how many days are left
|
from today to the unlocked date.
return count_days_between(now,dt2)
@login_required(login_url='/authentication')
def mygoals_page(request):
return render(request, 'mygoals/type/view.html',{
'settings': settings,
})
@login_required(login_url='/authentication')
def savings_goals_page(request):
# Check to see if we have the latest SavingsGoal set, if not then
# create a new goal here.
savings_goal = SavingsGoal.objects.get_latest(request.user.id)
if not savings_goal:
savings_goal = SavingsGoal.objects.create(user_id=request.user.id,)
# Check to see if the current SavingsGoal has 'is_closed=True' which means
# we need to create a new savings goal.
if savings_goal.is_closed == True:
savings_goal = SavingsGoal.objects.create(user_id=request.user.id,)
# Check how many days are remaining from today to the unlock date.
days_remaining = 99999
if savings_goal.unlocks:
days_remaining = count_days_between_today_and(savings_goal.unlocks)
# CASE 1 OF 2:
# Load the main goal settings UI.
url = ''
if days_remaining > 0:
url = 'mygoals/savings/view.html'
# CASE 2 OF 2:
# Load the UI to handle whether the goal was set or not.
else:
url = 'mygoals/savings/complete.html'
return render(request, url,{
'settings': settings,
'constants': constants,
'savings_goal': savings_goal,
'days_remaining': days_remaining,
})
@login_required(login_url='/authentication')
def credit_goals_page(request):
# Check to see if we have the latest CreditGoal set, if not then
# create a new goal here.
credit_goal = CreditGoal.objects.get_latest(request.user.id)
if not credit_goal:
credit_goal = CreditGoal.objects.create(user_id=request.user.id,)
# Check to see if the current SavingsGoal has 'is_closed=True' which means
# we need to create a new savings goal.
if credit_goal.is_closed == True:
credit_goal = CreditGoal.objects.create(user_id=request.user.id,)
# Check how many days are remaining from today to the unlock date.
days_remaining = 99999
if credit_goal.unlocks:
days_remaining = count_days_between_today_and(credit_goal.unlocks)
# CASE 1 OF 2:
# Load the main goal settings UI.
url = ''
if days_remaining > 0:
url = 'mygoals/credit/view.html'
# CASE 2 OF 2:
# Load the UI to handle whether the goal was set or not.
else:
url = 'mygoals/credit/complete.html'
return render(request, url,{
'settings': settings,
'constants': constants,
'credit_goal': credit_goal,
'days_remaining': days_remaining,
})
@login_required(login_url='/authentication')
def final_goal_page(request):
# Check to see if we have the latest FinalGoal set, if not then
# create a new goal here.
final_goal = FinalGoal.objects.get_latest(request.user.id)
if not final_goal:
final_goal = FinalGoal.objects.create(user_id=request.user.id,)
# Check to see if the current FinalGoal has 'is_closed=True' which means
# we need to create a new final goal.
if final_goal.is_closed == True:
final_goal = FinalGoal.objects.create(user_id=request.user.id,)
# Check how many days are remaining from today to the unlock date.
days_remaining = 99999
if final_goal.unlocks:
days_remaining = count_days_between_today_and(final_goal.unlocks)
# CASE 1 OF 2:
# Load the main goal settings UI.
url = ''
if days_remaining > 0:
url = 'mygoals/final/view.html'
# CASE 2 OF 2:
# Load the UI to handle whether the goal was set or not.
else:
url = 'mygoals/final/complete.html'
return render(request, url,{
'settings': settings,
'constants': constants,
'final_goal': final_goal,
'days_remaining': days_remaining,
})
@login_required(login_url='/authentication')
def goal_complete_page(request, goal_type, goal_id):
goal = None
try:
if goal_type == constants.SAVINGS_MYGOAL_TYPE:
goal = SavingsGoal.objects.get(id=goal_id)
elif goal_type == constants.CREDIT_MYGOAL_TYPE:
goal = CreditGoal.objects.get(id=goal_id)
elif goal_type == constants.GOAL_MYGOAL_TYPE:
goal = FinalGoal.objects.get(id=goal_id)
except Exception as e:
pass
return render(request, 'mygoals/complete/view.html',{
'settings': settings,
'constants': constants,
'goal_id': int(goal_id),
'goal_type': int(goal_type),
'goal': goal,
})
@login_required(login_url='/authentication')
def goal_failed_page(request, goal_type, goal_id):
goal = None
try:
if goal_type == constants.SAVINGS_MYGOAL_TYPE:
goal = SavingsGoal.objects.get(id=goal_id)
elif goal_type == constants.CREDIT_MYGOAL_TYPE:
goal = CreditGoal.objects.get(id=goal_id)
elif goal_type == constants.GOAL_MYGOAL_TYPE:
goal = FinalGoal.objects.get(id=goal_id)
except Exception as e:
pass
# Evaulate the User's profile
call_command('evaluate_me', str(request.me.id))
return render(request, 'mygoals/failed/view.html',{
'settings': settings,
'constants': constants,
'goal_id': int(goal_id),
'goal_type': int(goal_type),
'goal': goal,
})
|
quantumlib/Cirq
|
cirq-google/cirq_google/line/placement/optimization.py
|
Python
|
apache-2.0
| 4,663
| 0.001501
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Callable, Tuple, TypeVar
T = TypeVar('T')
def _accept(random_sample: float, cost_diff: float, temp: float) -> Tuple[bool, float]:
"""Calculates probability and draws if solution should be accepted.
Based on exp(-Delta*E/T) formula.
Args:
random_sample: Uniformly distributed random number in the range [0, 1).
cost_diff: Cost difference between new and previous solutions.
temp: Current temperature.
Returns:
Tuple of boolean and float, with boolean equal to True if solution is
accepted, and False otherwise. The float value is acceptance
probability.
"""
exponent = -cost_diff / temp
if exponent >= 0.0:
return True, 1.0
probability = math.exp(exponent)
return probability > random_sample, probability
def anneal_minimize(
initial: T,
cost_func: Callable[[T], float],
move_func: Callable[[T], T],
random_sample: Callable[[], flo
|
at],
temp_initial: float = 1.0e-2,
te
|
mp_final: float = 1e-6,
cooling_factor: float = 0.99,
repeat: int = 100,
trace_func: Callable[[T, float, float, float, bool], None] = None,
) -> T:
"""Minimize solution using Simulated Annealing meta-heuristic.
Args:
initial: Initial solution of type T to the problem.
cost_func: Callable which takes current solution of type T, evaluates it
and returns float with the cost estimate. The better solution is,
the lower resulting value should be; negative values are allowed.
move_func: Callable which takes current solution of type T and returns a
new solution candidate of type T which is random iteration over
input solution. The input solution, which is argument to this
callback should not be mutated.
random_sample: Callable which gives uniformly sampled random value from
the [0, 1) interval on each call.
temp_initial: Optional initial temperature for simulated annealing
optimization. Scale of this value is cost_func-dependent.
temp_final: Optional final temperature for simulated annealing
optimization, where search should be stopped. Scale of this value is
cost_func-dependent.
cooling_factor: Optional factor to be applied to the current temperature
and give the new temperature, this must be strictly greater than 0
and strictly lower than 1.
repeat: Optional number of iterations to perform at each given
temperature.
trace_func: Optional callback for tracing simulated annealing progress.
This is going to be called at each algorithm step for the arguments:
solution candidate (T), current temperature (float), candidate cost
(float), probability of accepting candidate (float), and acceptance
decision (boolean).
Returns:
The best solution found.
Raises:
ValueError: When supplied arguments are invalid.
"""
if not 0.0 < cooling_factor < 1.0:
raise ValueError("Cooling factor must be within (0, 1) range")
temp = temp_initial
sol = initial
sol_cost = cost_func(initial)
best = sol
best_cost = sol_cost
if trace_func:
trace_func(sol, temp, sol_cost, 1.0, True)
while temp > temp_final:
for _ in range(0, repeat):
# Find a new solution candidate and evaluate its cost.
cand = move_func(sol)
cand_cost = cost_func(cand)
# Store the best solution, regardless if it is accepted or not.
if best_cost > cand_cost:
best = cand
best_cost = cand_cost
accepted, probability = _accept(random_sample(), cand_cost - sol_cost, temp)
if accepted:
sol = cand
sol_cost = cand_cost
if trace_func:
trace_func(cand, temp, cand_cost, probability, accepted)
temp *= cooling_factor
return best
|
buchwj/xvector
|
client/xVClient/ErrorReporting.py
|
Python
|
gpl-3.0
| 4,145
| 0.002413
|
# xVector Engine Client
# Copyright (c) 2011 James Buchwald
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Contains code for nicely reporting errors to the user.
"""
import logging
import traceback
from PyQt4 import QtGui
from xVClient import ClientGlobals
mainlog = logging.getLogger("")
# Severity constants
FatalError = 1
"""Fatal error, forces termination of application."""
NormalError = 2
"""Normal error, this has impact but does not crash the program."""
WarningError = 3
"""Warning, this does not affect function but should cause concern."""
NoticeError = 4
"""General information."""
def ShowError(message, severity=NormalError, parent=None):
"""
Displays an error message to the user and waits for a response.
"""
dlg = QtGui.QMessageBox(parent)
dlg.setText(message)
if severity == FatalError:
dlg.setIcon(QtGui.QMessageBox.Critical)
dlg.setWindowTitle("Fatal Error")
elif severity == NormalError:
dlg.setIcon(QtGui.QMessageBox.Critical)
dlg.setWindowTitle("Error")
elif severity == WarningError:
dlg.setIcon(QtGui.QMessageBox.Warning)
dlg.setWindowTitle("Warning")
elif severity == NoticeError:
dlg.setIcon(QtGui.QMessageBox.Information)
dlg.setWindowTitle("Notice")
else:
dlg.setIcon(QtGui.QMessageBox.NoIcon)
dlg.setWindowTitle("Message")
dlg.exec_()
def ShowException(severity=NormalError, start_msg='An error has occurred!', parent=None):
'''
Displays the currently-handled exception in an error box.
'''
msg = start_msg + "\n\n" + traceback.format_exc()
ShowError(msg, severity, parent)
class ErrorMessageHandler(logging.Handler):
'''
Logging handler that displays messages
|
in Qt message boxes.
'''
def __init__(self, parent=None):
'''
Creates a new handler.
@type parent: QtGui.QWi
|
dget
@param parent: Parent widget for errors to be displayed under.
'''
super(ErrorMessageHandler,self).__init__()
self.Parent = parent
'''Parent widget for errors to be displayed under.'''
def _ShowError(self, message):
'''
Shows an error message and returns immediately.
@type message: string
@param message: Message to display.
'''
app = ClientGlobals.Application
wnd = QtGui.QMessageBox(parent=self.Parent)
wnd.setIcon(QtGui.QMessageBox.Critical)
wnd.setWindowTitle("Error")
wnd.setStandardButtons(QtGui.QMessageBox.Ok)
wnd.setText(message)
wnd.exec_()
def emit(self, record):
self._ShowError(record.getMessage())
def ConfigureLogging(parent=None):
'''
Configures the logging mechanism to report errors as dialog boxes.
@type parent: QtGui.QWidget
@param parent: Parent widget for errors to be displayed under.
'''
# Set up the error handler (output to a message box).
handler = ErrorMessageHandler(parent)
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
handler.setLevel(logging.ERROR)
mainlog.addHandler(handler)
# Send lower-level messages to stderr.
lowhandler = logging.StreamHandler()
lowhandler.setFormatter(formatter)
lowhandler.setLevel(logging.DEBUG)
mainlog.addHandler(lowhandler)
# Make sure that the logger catches all levels of messages.
mainlog.setLevel(logging.DEBUG)
|
melvin0008/pythoncodestrial
|
trybs4.py
|
Python
|
apache-2.0
| 697
| 0.030129
|
from bs4 import BeautifulSoup
import xlsxwriter
workbook= xlsxwriter.Workbook("data.xlsx")
worksheet = workbook.add_worksheet()
f = open('rough.html',"r")
data=f.read()
soup=BeautifulSoup(data)
div = soup.find('div', {"class":'dataTables_scroll'})
table=div.find('table')
tbody=div.find('tbody')
rows=tbody.find_all('tr')
rowno = 0
for row in rows:
a=row.find_all('a')
td=row.find_all('td')
worksheet.write(rowno, 1, a[2].text)
worksheet.write(rowno, 2, td[3].text[td[3].text.find('P:'):])
workshe
|
et.write(rowno, 3, a[3].text)
worksheet.write(rowno, 4, a[4].text)
worksheet.write(rowno, 5, a[3].text)
worksheet.write(rowno, 6, td
|
[6].text)
rowno=rowno+1
workbook.close()
print "Done"
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/sklearn/datasets/setup.py
|
Python
|
mit
| 658
| 0
|
import os
import numpy
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('datasets', parent_package, top
|
_path)
config.add_data_dir('data')
config.add_data_dir('descr')
config.add_data_dir('images')
config.add_data_dir(os.path.join('tests', 'data'))
config.add_extension('_svmlight_format',
sources=['_svmlight_format.c'],
include_dirs=[numpy.get_include()])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
|
setup(**configuration(top_path='').todict())
|
jwodder/qypi
|
test/test_main.py
|
Python
|
mit
| 30,735
| 0.000488
|
import json
from traceback import format_exception
from click.testing import CliRunner
import pytest
from qypi.__main__ import qypi
def show_result(r):
if r.exception is not None:
return "".join(format_exception(*r.exc_info))
else:
return r.output
def test_list(mocker):
spinstance = mocker.Mock(
**{
"list_packages.return_value": [
"foobar",
"BarFoo",
"quux",
"Gnusto-Cleesh",
"XYZZY_PLUGH",
],
}
)
spclass = mocker.patch("qypi.api.ServerProxy", return_value=spinstance)
r = CliRunner().invoke(qypi, ["list"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"foobar\n" "BarFoo\n" "quux\n" "Gnusto-Cleesh\n" "XYZZY_PLUGH\n"
)
spclass.assert_called_once_with("https://pypi.org/pypi")
assert spinstance.method_calls == [mocker.call.list_packages()]
def test_owner(mocker):
spinstance = mocker.Mock(
**{
"package_roles.return_value": [
["Owner", "luser"],
["Maintainer", "jsmith"],
],
}
)
spclass = mocker.patch("qypi.api.ServerProxy", return_value=spinstance)
r = CliRunner().invoke(qypi, ["owner", "foobar"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"{\n"
' "foobar": [\n'
" {\n"
' "role": "Owner",\n'
' "user": "luser"\n'
" },\n"
" {\n"
' "role": "Maintainer",\n'
' "user": "jsmith"\n'
" }\n"
" ]\n"
"}\n"
)
spclass.assert_called_once_with("https://pypi.org/pypi")
assert spinstance.method_calls == [mocker.call.package_roles("foobar")]
def test_multiple_owner(mocker):
spinstance = mocker.Mock(
**{
"package_roles.side_effect": [
[
["Owner", "luser"],
["Maintainer", "jsmith"],
],
[
["Owner", "jsmith"],
["Owner", "froody"],
],
],
}
)
spclass = mocker.patch("qypi.api.ServerProxy", return_value=spinstance)
r = CliRunner().invoke(qypi, ["owner", "foobar", "Glarch"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"{\n"
' "foobar": [\n'
" {\n"
' "role": "Owner",\n'
' "user": "luser"\n'
" },\n"
" {\n"
' "role": "Maintainer",\n'
' "user": "jsmith"\n'
" }\n"
" ],\n"
' "Glarch": [\n'
" {\n"
' "role": "Owner",\n'
' "user": "jsmith"\n'
" },\n"
" {\n"
' "role": "Owner",\n'
' "user": "froody"\n'
" }\n"
" ]\n"
"}\n"
)
spclass.assert_called_once_with("https://pypi.org/pypi")
assert spinstance.method_calls == [
mocker.call.package_roles("foobar"),
mocker.call.package_roles("Glarch"),
]
def test_owned(mocker):
spinstance = mocker.Mock(
**{
"user_packages.return_value": [
["Owner", "foobar"],
|
["Maintainer", "quux"],
],
}
)
spclass = mocker.patch("qypi.api.ServerProxy", return_value=spinstance)
r = CliRunner().invoke(qypi, ["owned", "luser"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"{\n"
' "luser": [\n'
" {\n"
' "package": "foobar",\n'
'
|
"role": "Owner"\n'
" },\n"
" {\n"
' "package": "quux",\n'
' "role": "Maintainer"\n'
" }\n"
" ]\n"
"}\n"
)
spclass.assert_called_once_with("https://pypi.org/pypi")
assert spinstance.method_calls == [mocker.call.user_packages("luser")]
def test_multiple_owned(mocker):
spinstance = mocker.Mock(
**{
"user_packages.side_effect": [
[
["Owner", "foobar"],
["Maintainer", "quux"],
],
[
["Maintainer", "foobar"],
["Owner", "Glarch"],
],
],
}
)
spclass = mocker.patch("qypi.api.ServerProxy", return_value=spinstance)
r = CliRunner().invoke(qypi, ["owned", "luser", "jsmith"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"{\n"
' "luser": [\n'
" {\n"
' "package": "foobar",\n'
' "role": "Owner"\n'
" },\n"
" {\n"
' "package": "quux",\n'
' "role": "Maintainer"\n'
" }\n"
" ],\n"
' "jsmith": [\n'
" {\n"
' "package": "foobar",\n'
' "role": "Maintainer"\n'
" },\n"
" {\n"
' "package": "Glarch",\n'
' "role": "Owner"\n'
" }\n"
" ]\n"
"}\n"
)
spclass.assert_called_once_with("https://pypi.org/pypi")
assert spinstance.method_calls == [
mocker.call.user_packages("luser"),
mocker.call.user_packages("jsmith"),
]
def test_search(mocker):
spinstance = mocker.Mock(
**{
"search.return_value": [
{
"name": "foobar",
"version": "1.2.3",
"summary": "Foo all your bars",
"_pypi_ordering": False,
},
{
"name": "quux",
"version": "0.1.0",
"summary": "Do that thing this does",
"_pypi_ordering": True,
},
{
"name": "gnusto",
"version": "0.0.0",
"summary": "",
"_pypi_ordering": False,
},
],
}
)
spclass = mocker.patch("qypi.api.ServerProxy", return_value=spinstance)
r = CliRunner().invoke(qypi, ["search", "term", "keyword:foo", "readme:bar"])
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "name": "foobar",\n'
' "summary": "Foo all your bars",\n'
' "version": "1.2.3"\n'
" },\n"
" {\n"
' "name": "quux",\n'
' "summary": "Do that thing this does",\n'
' "version": "0.1.0"\n'
" },\n"
" {\n"
' "name": "gnusto",\n'
' "summary": null,\n'
' "version": "0.0.0"\n'
" }\n"
"]\n"
)
spclass.assert_called_once_with("https://pypi.org/pypi")
assert spinstance.method_calls == [
mocker.call.search(
{"description": ["term", "bar"], "keywords": ["foo"]},
"and",
)
]
def test_browse(mocker):
spinstance = mocker.Mock(
**{
"browse.return_value": [
["foobar", "1.2.3"],
["foobar", "1.2.2"],
["foobar", "1.2.1"],
["foobar", "1.2.0"],
["quux", "0.1.0"],
["gnusto", "0.0.0"],
],
}
)
spclass = mocker.patch("qypi.api.ServerProxy", return_value=spinstance)
r = CliRunner().invoke(
qypi,
["browse", "Typing :: Typed", "Topic :: Utilities"],
)
assert r.exit_code == 0, show_result(r)
assert r.output == (
"[\n"
" {\n"
' "name": "foobar",\n'
' "version": "1.2.3"\n'
" },\n"
" {\n"
' "name": "foobar",\n'
' "
|
wang1352083/pythontool
|
python-2.7.12-lib/test/test_extcall.py
|
Python
|
mit
| 7,975
| 0.000251
|
# -*- coding: utf-8 -*-
"""Doctest for method/function calls.
We're going the use these types for extra testing
>>> from UserList import UserList
>>> from UserDict import UserDict
We're defining four helper functions
>>> def e(a,b):
... print a, b
>>> def f(*a, **k):
... print a, test_support.sortdict(k)
>>> def g(x, *y, **z):
... print x, y, test_support.sortdict(z)
>>> def h(j=1, a=2, h=3):
... print j, a, h
Argument list examples
>>> f()
() {}
>>> f(1)
(1,) {}
>>> f(1, 2)
(1, 2) {}
>>> f(1, 2, 3)
(1, 2, 3) {}
>>> f(1, 2, 3, *(4, 5))
(1, 2, 3, 4, 5) {}
>>> f(1, 2, 3, *[4, 5])
(1, 2, 3, 4, 5) {}
>>> f(1, 2, 3, *UserList([4, 5]))
(1, 2, 3, 4, 5) {}
Here we add keyword arguments
>>> f(1, 2, 3, **{'a':4, 'b':5})
(1, 2, 3) {'a': 4, 'b': 5}
>>> f(1, 2, 3, *[4, 5], **{'a':6, 'b':7})
(1, 2, 3, 4, 5) {'a': 6, 'b': 7}
>>> f(1, 2, 3, x=4, y=5, *(6, 7), **{'a':8, 'b': 9})
(1, 2, 3, 6, 7) {'a': 8, 'b': 9, 'x': 4, 'y': 5}
>>> f(1, 2, 3, **UserDict(a=4, b=5))
(1, 2, 3) {'a': 4, 'b': 5}
>>> f(1, 2, 3, *(4, 5), **UserDict(a=6, b=7))
(1, 2, 3, 4, 5) {'a': 6, 'b': 7}
>>> f(1, 2, 3, x=4, y=5, *(6, 7), **UserDict(a=8, b=9))
(1, 2, 3, 6, 7) {'a': 8, 'b': 9, 'x': 4, 'y': 5}
Examples with invalid arguments (TypeErrors). We're also testing the function
names in the exception messages.
Verify clearing of SF bug #733667
>>> e(c=4)
Traceback (most recent call last):
...
TypeError: e() got an unexpected keyword argument 'c'
>>> g()
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(*())
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(*(), **{})
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(1)
1 () {}
>>> g(1, 2)
1 (2,) {}
>>> g(1, 2, 3)
1 (2, 3) {}
>>> g(1, 2, 3, *(4, 5))
1 (2, 3, 4, 5) {}
>>> class Nothing: pass
...
>>> g(*Nothing())
Traceback (most recent call last):
...
TypeError: g() argument after * must be an iterable, not instance
>>> class Nothing:
... def __len__(self): return 5
...
>>> g(*Nothing())
Traceback (most recent call last):
...
TypeError: g() argument after * must be an iterable, not instance
>>> class Nothing():
... def __len__(self): return 5
... def __getitem__(self, i):
... if i<3: return i
... else: raise IndexError(i)
...
>>> g(*Nothing())
0 (1, 2) {}
>>> class Nothing:
... def __init__(self): self.c = 0
... def __iter__(self): return self
... def next(self):
... if self.c == 4:
... raise StopIteration
... c = self.c
... self.c += 1
... return c
...
>>> g(*Nothing())
0 (1, 2, 3) {}
Check for issue #4806: Does a TypeError in a generator get propagated with the
right error message?
>>> def broken(): raise TypeError("myerror")
...
>>> g(*(broken() for i in range(1)))
Traceback (most recent call last):
...
TypeError: myerror
Make sure that the function doesn't stomp the dictionary
>>> d = {'a': 1, 'b': 2, 'c': 3}
>>> d2 = d.copy()
>>> g(1, d=4, **d)
1 () {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> d == d2
True
What about willful misconduct?
>>> def saboteur(**kw):
... kw['x'] = 'm'
... return kw
>>> d = {}
>>> kw = saboteur(a=1, **d)
>>> d
{}
>>> g(1, 2, 3, **{'x': 4, 'y': 5})
Traceback (most recent call last):
...
TypeError: g() got multiple values for keyword argument 'x'
>>> f(**{1:2})
Traceback (most recent call last):
...
TypeError: f() keywords must be strings
>>> h(**{'e': 2})
Traceback (most recent call last):
...
TypeError: h() got an unexpected keyword argument 'e'
>>> h(*h)
Traceback (most recent call last):
...
TypeError: h() argument after * must be an iterable, not function
>>> dir(*h)
Traceback (most recent call last):
...
TypeError: dir() argument after * must be an iterable, not function
>>> None(*h)
Traceback (most recent call last):
...
TypeError: NoneType object argument after * must be an iterable, \
not function
>>> h(**h)
Traceback (most recent call last):
...
TypeError: h() argument after ** must be a mapping, not function
>>> dir(**h)
Traceback (most recent call last):
...
TypeError: dir() argument af
|
ter ** must be a mapping, not function
>>> None(**h)
Traceback (most recent call last):
...
TypeError: NoneType object argument after ** must be a mapping, \
not function
>>> dir(b=1, **{'b': 1})
Traceback (most recent call last):
...
|
TypeError: dir() got multiple values for keyword argument 'b'
Another helper function
>>> def f2(*a, **b):
... return a, b
>>> d = {}
>>> for i in xrange(512):
... key = 'k%d' % i
... d[key] = i
>>> a, b = f2(1, *(2,3), **d)
>>> len(a), len(b), b == d
(3, 512, True)
>>> class Foo:
... def method(self, arg1, arg2):
... return arg1+arg2
>>> x = Foo()
>>> Foo.method(*(x, 1, 2))
3
>>> Foo.method(x, *(1, 2))
3
>>> Foo.method(*(1, 2, 3))
Traceback (most recent call last):
...
TypeError: unbound method method() must be called with Foo instance as \
first argument (got int instance instead)
>>> Foo.method(1, *[2, 3])
Traceback (most recent call last):
...
TypeError: unbound method method() must be called with Foo instance as \
first argument (got int instance instead)
A PyCFunction that takes only positional parameters should allow an
empty keyword dictionary to pass without a complaint, but raise a
TypeError if te dictionary is not empty
>>> try:
... silence = id(1, *{})
... True
... except:
... False
True
>>> id(1, **{'foo': 1})
Traceback (most recent call last):
...
TypeError: id() takes no keyword arguments
A corner case of keyword dictionary items being deleted during
the function call setup. See <http://bugs.python.org/issue2016>.
>>> class Name(str):
... def __eq__(self, other):
... try:
... del x[self]
... except KeyError:
... pass
... return str.__eq__(self, other)
... def __hash__(self):
... return str.__hash__(self)
>>> x = {Name("a"):1, Name("b"):2}
>>> def f(a, b):
... print a,b
>>> f(**x)
1 2
An obscure message:
>>> def f(a, b):
... pass
>>> f(b=1)
Traceback (most recent call last):
...
TypeError: f() takes exactly 2 arguments (1 given)
The number of arguments passed in includes keywords:
>>> def f(a):
... pass
>>> f(6, a=4, *(1, 2, 3))
Traceback (most recent call last):
...
TypeError: f() takes exactly 1 argument (5 given)
"""
import unittest
import sys
from test import test_support
class ExtCallTest(unittest.TestCase):
def test_unicode_keywords(self):
def f(a):
return a
self.assertEqual(f(**{u'a': 4}), 4)
self.assertRaises(TypeError, f, **{u'stören': 4})
self.assertRaises(TypeError, f, **{u'someLongString':2})
try:
f(a=4, **{u'a': 4})
except TypeError:
pass
else:
self.fail("duplicate arguments didn't raise")
def test_main():
test_support.run_doctest(sys.modules[__name__], True)
test_support.run_unittest(ExtCallTest)
if __name__ == '__main__':
test_main()
|
sebnorth/extended_user
|
mysite/settings.py
|
Python
|
bsd-3-clause
| 5,917
| 0.001183
|
import os
# Django settings for mysite project.
DEBUG = True
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
SITE_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(SITE_ROOT, 'db.sqlite3'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
DATE_INPUT_FORMATS = ('%d/%m/%Y')
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Zurich'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '05=^qgbhg3!6-dzb6#&2j^jmh-2fgc%22!z_!w*&8iy_m$2*$*'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mysite.wsgi.application'
TEMPLATES = [
{
'BA
|
CKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(SITE_ROOT, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processor
|
s.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages'
],
'debug': DEBUG,
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'polls'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
wangxiangyu/horizon
|
openstack_dashboard/test/api_tests/nova_rest_tests.py
|
Python
|
apache-2.0
| 11,121
| 0
|
# Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from django.conf import settings
from openstack_dashboard.api.rest import nova
from openstack_dashboard.test import helpers as test
class NovaRestTestCase(test.TestCase):
#
# Keypairs
#
@mock.patch.object(nova.api, 'nova')
def test_keypair_get(self, nc):
request = self.mock_rest_request()
nc.keypair_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
]
response = nova.Keypairs().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
|
'{"items": [{"id": "one"}, {"id": "two"}]}')
nc.keypair_list.assert_called_once_with(request)
@mock.patch.object(nova.api, 'nova')
def test_keypair_create(self, nc):
request = self.mock_rest_request(body='''{"name": "Ni!"}''')
new = nc.keypair_create.return_value
new.to_dict.retur
|
n_value = {'name': 'Ni!', 'public_key': 'sekrit'}
new.name = 'Ni!'
with mock.patch.object(settings, 'DEBUG', True):
response = nova.Keypairs().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content,
'{"name": "Ni!", "public_key": "sekrit"}')
self.assertEqual(response['location'], '/api/nova/keypairs/Ni%21')
nc.keypair_create.assert_called_once_with(request, 'Ni!')
@mock.patch.object(nova.api, 'nova')
def test_keypair_import(self, nc):
request = self.mock_rest_request(body='''
{"name": "Ni!", "public_key": "hi"}
''')
new = nc.keypair_import.return_value
new.to_dict.return_value = {'name': 'Ni!', 'public_key': 'hi'}
new.name = 'Ni!'
with mock.patch.object(settings, 'DEBUG', True):
response = nova.Keypairs().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content,
'{"name": "Ni!", "public_key": "hi"}')
self.assertEqual(response['location'], '/api/nova/keypairs/Ni%21')
nc.keypair_import.assert_called_once_with(request, 'Ni!', 'hi')
#
# Availability Zones
#
def test_availzone_get_brief(self):
self._test_availzone_get(False)
def test_availzone_get_detailed(self):
self._test_availzone_get(True)
@mock.patch.object(nova.api, 'nova')
def _test_availzone_get(self, detail, nc):
if detail:
request = self.mock_rest_request(GET={'detailed': 'true'})
else:
request = self.mock_rest_request(GET={})
nc.availability_zone_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'id': 'one'}}),
mock.Mock(**{'to_dict.return_value': {'id': 'two'}}),
]
response = nova.AvailabilityZones().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"id": "one"}, {"id": "two"}]}')
nc.availability_zone_list.assert_called_once_with(request, detail)
#
# Limits
#
def test_limits_get_not_reserved(self):
self._test_limits_get(False)
def test_limits_get_reserved(self):
self._test_limits_get(True)
@mock.patch.object(nova.api, 'nova')
def _test_limits_get(self, reserved, nc):
if reserved:
request = self.mock_rest_request(GET={'reserved': 'true'})
else:
request = self.mock_rest_request(GET={})
nc.tenant_absolute_limits.return_value = {'id': 'one'}
response = nova.Limits().get(request)
self.assertStatusCode(response, 200)
nc.tenant_absolute_limits.assert_called_once_with(request, reserved)
self.assertEqual(response.content, '{"id": "one"}')
#
# Servers
#
@mock.patch.object(nova.api, 'nova')
def test_server_create_missing(self, nc):
request = self.mock_rest_request(body='''{"name": "hi"}''')
response = nova.Servers().post(request)
self.assertStatusCode(response, 400)
self.assertEqual(response.content,
'"missing required parameter \'source_id\'"')
nc.server_create.assert_not_called()
@mock.patch.object(nova.api, 'nova')
def test_server_create_basic(self, nc):
request = self.mock_rest_request(body='''{"name": "Ni!",
"source_id": "image123", "flavor_id": "flavor123",
"key_name": "sekrit", "user_data": "base64 yes",
"security_groups": [{"name": "root"}]}
''')
new = nc.server_create.return_value
new.to_dict.return_value = {'id': 'server123'}
new.id = 'server123'
response = nova.Servers().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response.content, '{"id": "server123"}')
self.assertEqual(response['location'], '/api/nova/servers/server123')
nc.server_create.assert_called_once_with(
request, 'Ni!', 'image123', 'flavor123', 'sekrit', 'base64 yes',
[{'name': 'root'}]
)
@mock.patch.object(nova.api, 'nova')
def test_server_get_single(self, nc):
request = self.mock_rest_request()
nc.server_get.return_value.to_dict.return_value = {'name': '1'}
response = nova.Server().get(request, "1")
self.assertStatusCode(response, 200)
nc.server_get.assert_called_once_with(request, "1")
#
# Extensions
#
@mock.patch.object(nova.api, 'nova')
@mock.patch.object(settings,
'OPENSTACK_NOVA_EXTENSIONS_BLACKLIST', ['baz'])
def _test_extension_list(self, nc):
request = self.mock_rest_request()
nc.list_extensions.return_value = [
mock.Mock(**{'to_dict.return_value': {'name': 'foo'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'bar'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'baz'}}),
]
response = nova.Extensions().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.content,
'{"items": [{"name": "foo"}, {"name": "bar"}]}')
nc.list_extensions.assert_called_once_with(request)
#
# Flavors
#
def test_get_extras_no(self):
self._test_flavor_get_single(get_extras=False)
def test_get_extras_yes(self):
self._test_flavor_get_single(get_extras=True)
def test_get_extras_default(self):
self._test_flavor_get_single(get_extras=None)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_get_single(self, nc, get_extras):
if get_extras:
request = self.mock_rest_request(GET={'get_extras': 'tRuE'})
elif get_extras is None:
request = self.mock_rest_request()
get_extras = False
else:
request = self.mock_rest_request(GET={'get_extras': 'fAlsE'})
nc.flavor_get.return_value.to_dict.return_value = {'name': '1'}
response = nova.Flavor().get(request, "1")
self.assertStatusCode(response, 200)
if get_extras:
self.assertEqual(response.content, '{"extras": {}, "name": "1"}')
else:
self.assertEqual(response.content, '{"name": "1"}')
nc.flavor_get.assert_called_once_with(request, "1",
get_extras=get_extras)
@mock.patch.object(nova.api, 'nova')
def _test_flavor_list_public(self, nc, is_public=None):
|
clovemfeng/studydemo
|
20140617/userlist_data.py
|
Python
|
gpl-2.0
| 657
| 0.024353
|
#!/usr/bin/python3
def sanitize(time_string):
if '-' in time_string:
splitter = '-'
elif ':' in time_string:
splitter = ':'
else:
retu
|
rn(time_string)
(mins, secs) = time_string.strip().split(splitter)
return(mins + '.' + secs)
def get_coach_data(filename):
try:
with open(filename) as fn:
data = fn.readline()
return(data.strip().split(','))
except IOError as ioerr:
print('File Error:' + str(ioerr))
return(None)
sarah = get_coach_data('sarah2.txt')
(sarah_name, sarach_dob) = sarah.pop(0), sarah.pop(0)
print(sarah_name + "'s fastest time are:"+
str(sorted(set([sanitize(t) for t in sar
|
ah]))[0:3]))
|
donkeysharp/elvispy
|
elvis/climanager.py
|
Python
|
mit
| 889
| 0.00225
|
import os
def create_peanut(peanut_name):
peanut_dir = './peanuts/%s' % peanut_name
if os.path.exists(peanut_dir):
print('Peanut already exists')
return
os.mkdir(peanut_dir)
os.mkdir(peanut_dir
|
+ '/templates')
f = open(peanut_dir + '/__init__.py', 'w')
f.write('')
f.flush()
f.close()
f = open(peanut_dir + '/main.py', 'w')
f.write('\n__META__ = {\n')
f.write(" 'displayName': '%s',\n" % peanut_name)
f.write(" 'description': 'Peanut description',\n")
f.
|
write(" 'version': '0.1',\n")
f.write(" 'enabled': True,\n")
f.write("}\n\n")
f.write('def load(peanut):\n')
f.write(" print('Loading peanut %s')\n" % peanut_name)
f.flush()
f.close()
def clean():
if os.path.exists('./.__tmp__') and os.path.isdir('./.__tmp__'):
import shutil
shutil.rmtree('./.__tmp__')
|
RESTfactory/presence
|
presence/migrations/0002_session.py
|
Python
|
gpl-3.0
| 830
| 0.00241
|
# -*- c
|
oding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-03 14:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
|
dependencies = [
('presence', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Session',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.DateTimeField()),
('end', models.DateTimeField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('entity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='presence.Entity')),
],
),
]
|
tortugueta/multilayers
|
multilayers.py
|
Python
|
gpl-3.0
| 70,824
| 0.000706
|
# -*- coding: utf-8 -*-
"""
Name : multilayers
Author : Joan Juvert <trust.no.one.51@gmail.com>
Version : 1.0
Description : A class library to simulate light propagation in
: multilayer systems.
Copyright 2012 Joan Juvert
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
################################# ToDo ################################
#
# Some attributes that have a 'get' method could be decorated as
# properties in order to supress the parantheses in the method call.
#
# The reflection coefficients for TE and TM waves, rte and rtm, as well
# as their ratio -rtm/rte, could be expressed in terms of ellipsometric
# angles Psi and Delta (see Handbook of ellipsometry, Tompkins)
#
#######################################################################
import bphysics as bp
import numpy as np
import scipy.interpolate as interpolation
############################ Class definitions ########################
class Medium(object):
"""
The Medium class implements an object representing an optical
medium (basically its refractive index).
It contains the minimum and maximum wavelengths for which the
refractive index is known and a couple of interpolators to calculate
the refractive index and extintion coefficient at any wavelength in
the available range.
All the attributes are private and accessed through the provided
methods.
"""
def __init__(self, filename, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None):
"""
Initialize a Medium instance.
The refractive indices that characterize the medium are read
from a text file. After loading the table of refractive indices
an interpolator is built that allows to calculate the refractive
index at any wavelength within the available range.
Note that the table is actually read through the numpy.loadtxt
function. The loaded text file must have a column with the
wavelength values, another with the real part of the refractive
index, and another with its imaginary part. If there are other
columns in your file, or there are not in that order, the
'usecols' optional argument can be used to select which columns
to read.
Parameters
----------
filename : str
Path to the file containing the table of triplets
(wavelenght, n, k) that chara
|
cterize the index of refraction
of the medium.
comments : str, optional
The character used t
|
o indicate the start of a comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will
convert that column to a float. E.g., if column 0 is a date
string:``converters = {0: datestr2num}``. Converters can
also be used to provide a default value for missing data
(but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``.
Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th
columns. The default, None, results in all columns being
read.
Returns
-------
out : Medium
A Medium instance.
See also
--------
numpy.loadtxt
"""
# Initialize variables
self.__maxWlength = None
self.__minWlength = None
self.__nInterpolator = None
self.__kInterpolator = None
# Load the table of refractive indices and generate the
# interpolators.
table = np.loadtxt(filename, 'float', comments, delimiter,
converters, skiprows, usecols)
wavelengths = table[:, 0]
refrIndex = table[:, 1]
extCoef = table[:, 2]
self.__maxWlength = wavelengths.max()
self.__minWlength = wavelengths.min()
self.__nInterpolator = interpolation.interp1d(
wavelengths, refrIndex, kind='cubic')
self.__kInterpolator = interpolation.interp1d(
wavelengths, extCoef, kind='cubic')
def getRefrIndex(self, wavelength):
"""
Returns the complex refractive index at the given wavelength.
Parameters
----------
wavelength : float
The wavelength at which we want to calculate the complex
refractive index. In the same units as in the file from
which the refractive indices were loaded.
Returns
-------
out : numpy.complex128
The complex refractive index.
"""
try:
return self.__nInterpolator(wavelength) + \
self.__kInterpolator(wavelength) * 1j
except ValueError:
print("Error: you are trying to work at a wavelength outside " + \
"the range where the refractive indices are known")
raise
def getMinMaxWlength(self):
"""
Returns a tuple (min, max) with the shortest and longest
wavelengths for which the refractive index is known.
Returns
-------
out : tuple
A tuple with the minimum and maximum wavelengths for which
the refractive index can be calculated. In the same units as
in the file from which the refractive indices were loaded.
"""
return (self.__minWlength, self.__maxWlength)
class Multilayer(object):
"""
The Multilayer class implements a layered optical medium in a
logical way. That allows to perform some complex calculations in an
understandable and flexible way.
All the attributes are private and accessed through the provided
methods. The structure is the following:
workingWavelength
minMaxWlength
polarization
charMatrixUpDown
charMatrixDownUp
coefficientsUpDown --> {'r', 't', 'R', 'T'}
coefficientsDownUp --> {'r', 't', 'R', 'T'}
stack --> [
top medium,
layer 1
.
.
.
layer N,
bottom medium ----> {
] 'medium', ------> Medium instance
'position',
'thickness',
'angle',
'matrix'
'refindex'
}
#There are properties that are common to the whole system:
- Wavelength of the light.
- Minimum and maximum wavelengths at which the refractive
indices can be calculated in all the layers.
- Polarization of the light.
- The characteristic matrix in the up-down direction of
propagation.
- The characteristic matrix in the down-up direction of
propagation.
- The optical coefficients (reflection coefficient, refraction
coefficient, reflectance and transmittance).
The stack is implemented as a list and contains parameters that
change in each layer. Each layer is a dictionary with
|
meisamhe/GPLshared
|
Programming/MPI — AMath 483 583, Spring 2013 1.0 documentation_files/average_top_3_scores.py
|
Python
|
gpl-3.0
| 1,767
| 0.001132
|
import sys
import operator
import collections
import random
import string
import heapq
# @include
def
|
find_student_with_highest_best_of_three_scores(name_score_data):
student_scores = collections.defaultdict(list)
for line in name_score_data:
name, score = lin
|
e.split()
if len(student_scores[name]) < 3:
heapq.heappush(student_scores[name], int(score))
else:
heapq.heappushpop(student_scores[name], int(score))
return max([(sum(scores), name) for name, scores in student_scores.items()
if len(scores) == 3],
key=operator.itemgetter(0),
default='no such student')[1]
# @exclude
def rand_string(length):
return ''.join(random.choice(string.ascii_lowercase) for _ in range(length))
def simple_test():
with open('scores.txt', 'w') as ofs:
ofs.write('''adnan 100
amit 99
adnan 98
thl 90
adnan 10
amit 100
thl 99
thl 95
dd 100
dd 100
adnan 95''')
with open('scores.txt') as name_score_data:
result = find_student_with_highest_best_of_three_scores(name_score_data)
print('result =', result)
assert result == 'adnan'
def main():
simple_test()
n = int(sys.argv[1]) if len(sys.argv) == 2 else random.randint(1, 10000)
with open('scores.txt', 'w') as ofs:
for i in range(n):
test_num = random.randint(0, 20)
name = rand_string(random.randint(5, 10))
for _ in range(test_num):
print(name, random.randint(0, 100), file=ofs)
with open('scores.txt') as name_score_data:
name = find_student_with_highest_best_of_three_scores(name_score_data)
name_score_data.seek(0)
print('top student is', name)
if __name__ == '__main__':
main()
|
meantheory/dotfiles
|
dos/src/dos/config.py
|
Python
|
mit
| 2,504
| 0.000799
|
from pathlib import Path
import os
import structlog
log = structlog.get_logger()
_config = None
def get():
global _config
if not isinstance(_config, _build_config):
_config = _build_config()
return _config
class _build_config:
def __init__(self):
self._config = {}
self.dos_install_dir = os.environ["DOS_BIN"]
self.dos_log_dir = os.environ["DOS_LOG"]
self.env_var_contexts = ["dos"]
# load from toml file
self._load_toml_config()
# load from env variables
self._load_env_vars()
def get(self, key, default=None):
return self._config.get(key, N
|
one)
def put(self, key, value, context="default"):
self.add_config_value(key, value, context=context)
def check(self, key):
|
return key in self._config
def add_config_value(self, key, value, context="default"):
ctx_key = f"{context}_{key}"
self._config[ctx_key] = value
log.debug("set config", context=context, key=key, ctx_key=ctx_key)
def add_path_value(self, key, value, context):
self.add_config_value(key, Path(value), context=context)
def _load_toml_config(self):
# potentially add env var contexts
log.debug("loading toml config", file_name="TODO <> TODO")
def _load_env_vars(self):
log.debug("loading environ config")
for key in os.environ:
parts = key.lower().split("_")
ctx = parts[0]
if ctx not in self.env_var_contexts:
continue
log.info(f"discovered environ config", key=key)
if len(parts) == 2:
self.add_config_value(
parts[1], # key
os.environ[key], # value from env
context=ctx, # give context
)
elif len(parts) == 3:
k = parts[2]
t = parts[1]
if t == "path":
self.add_path_value(k, os.environ[key], context=ctx)
else:
raise ValueError(f'unrecognized key type "{t}" for "{key}"')
else:
ValueError(
f"incorrect number of parts for env var: {key}, expected 2 or 3"
)
def dos_bin(self):
log.info(f"dev ops shell bin: {self.dos_install_dir}")
dos_bin = Path(self.dos_install_dir)
dos_bin.mkdir(parents=True, exist_ok=True)
return dos_bin
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.