repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
ctoher/pymatgen | pymatgen/core/composition.py | Python | mit | 31,215 | 0.000993 | # coding: utf-8
from __future__ import division, unicode_literals
"""
This module implements a Composition class to represent compositions,
and a ChemicalPotential class to represent potentials.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Nov 10, 2012"
import collections
import numbers
import re
import string
import six
from six.moves import filter, map, zip
from fractions import Fraction
from functools import total_ordering
from monty.fractions import gcd
from pymatgen.core.periodic_table import get_el_sp, Element
from pymatgen.util.string_utils import formula_double_format
from pymatgen.serializers.json_coders import PMGSONable
from pymatgen.core.units import unitized
@total_ordering
class Composition(collections.Mapping, collections.Hashable, PMGSONable):
"""
Represents a Composition, which is essentially a {element:amount} mapping
type. Composition is written to be immutable and hashable,
unlike a standard Python dict.
Note that the key can be either an Element or a Specie. Elements and Specie
are treated differently. i.e., a Fe2+ is not the same as a Fe3+ Specie and
would be put in separate keys. This differentiation is deliberate to
support using Composition to determine the fraction of a particular Specie.
Works almost completely like a standard python dictionary, except that
__getitem__ is overridden to return 0 when an element is not found.
(somewhat like a defaultdict, except it is immutable).
Also adds more convenience methods relevant to compositions, e.g.,
get_fraction.
It should also be noted that many Composition related fun | ctionality takes
in a standard string as a convenient input. For example,
even though the internal representation of a Fe2O3 composition is
{Element("Fe"): 2, Element("O"): 3}, you can obtain the amount of Fe
simply by comp["Fe"] instead of the more verbose comp[Element("Fe")].
>>> comp = Co | mposition("LiFePO4")
>>> comp.get_atomic_fraction(Element("Li"))
0.14285714285714285
>>> comp.num_atoms
7.0
>>> comp.reduced_formula
'LiFePO4'
>>> comp.formula
'Li1 Fe1 P1 O4'
>>> comp.get_wt_fraction(Element("Li"))
0.04399794666951898
>>> comp.num_atoms
7.0
"""
"""
Tolerance in distinguishing different composition amounts.
1e-8 is fairly tight, but should cut out most floating point arithmetic
errors.
"""
amount_tolerance = 1e-8
"""
Special formula handling for peroxides and certain elements. This is so
that formula output does not write LiO instead of Li2O2 for example.
"""
special_formulas = {"LiO": "Li2O2", "NaO": "Na2O2", "KO": "K2O2",
"HO": "H2O2", "CsO": "Cs2O2", "RbO": "Rb2O2",
"O": "O2", "N": "N2", "F": "F2", "Cl": "Cl2",
"H": "H2"}
def __init__(self, *args, **kwargs): #allow_negative=False
"""
Very flexible Composition construction, similar to the built-in Python
dict(). Also extended to allow simple string init.
Args:
Any form supported by the Python built-in dict() function.
1. A dict of either {Element/Specie: amount},
{string symbol:amount}, or {atomic number:amount} or any mixture
of these. E.g., {Element("Li"):2 ,Element("O"):1},
{"Li":2, "O":1}, {3:2, 8:1} all result in a Li2O composition.
2. Keyword arg initialization, similar to a dict, e.g.,
Compostion(Li = 2, O = 1)
In addition, the Composition constructor also allows a single
string as an input formula. E.g., Composition("Li2O").
allow_negative: Whether to allow negative compositions. This
argument must be popped from the \*\*kwargs due to \*args
ambiguity.
"""
self.allow_negative = kwargs.pop('allow_negative', False)
# it's much faster to recognize a composition and use the elmap than
# to pass the composition to dict()
if len(args) == 1 and isinstance(args[0], Composition):
elmap = args[0]._elmap
elif len(args) == 1 and isinstance(args[0], six.string_types):
elmap = self._parse_formula(args[0])
else:
elmap = dict(*args, **kwargs)
self._elmap = {}
self._natoms = 0
for k, v in elmap.items():
if v < -Composition.amount_tolerance and not self.allow_negative:
raise CompositionError("Amounts in Composition cannot be "
"negative!")
if abs(v) >= Composition.amount_tolerance:
self._elmap[get_el_sp(k)] = v
self._natoms += abs(v)
def __getitem__(self, el):
"""
Get the amount for element.
"""
return self._elmap.get(get_el_sp(el), 0)
def __eq__(self, other):
# elements with amounts < Composition.amount_tolerance don't show up
# in the elmap, so checking len enables us to only check one
# compositions elements
if len(self) != len(other):
return False
for el, v in self._elmap.items():
if abs(v - other[el]) > Composition.amount_tolerance:
return False
return True
def __ge__(self, other):
"""
Defines >= for Compositions. Should ONLY be used for defining a sort
order (the behavior is probably not what you'd expect)
"""
for el in sorted(set(self.elements + other.elements)):
if other[el] - self[el] >= Composition.amount_tolerance:
return False
elif self[el] - other[el] >= Composition.amount_tolerance:
return True
return True
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
"""
Adds two compositions. For example, an Fe2O3 composition + an FeO
composition gives a Fe3O4 composition.
"""
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] += v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __sub__(self, other):
"""
Subtracts two compositions. For example, an Fe2O3 composition - an FeO
composition gives an FeO2 composition.
Raises:
CompositionError if the subtracted composition is greater than the
original composition in any of its elements, unless allow_negative
is True
"""
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] -= v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __mul__(self, other):
"""
Multiply a Composition by an integer or a float.
Fe2O3 * 4 -> Fe8O12
"""
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] * other for el in self},
allow_negative=self.allow_negative)
__rmul__ = __mul__
def __truediv__(self, other):
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] / other for el in self},
allow_negative=self.allow_negative)
__div__ = __truediv__
def __hash__(self):
"""
Minimally effective hash function that just distinguishes between
Compositions with different elements.
"""
hashcode = 0
for el in self._elmap.keys():
hashcode += el.Z
return hashcode
def __contains__(self, el):
return el in self._elmap
def __len__(self):
return len(self._elmap)
def __iter__(self):
retu |
AutorestCI/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/run_command_document_base.py | Python | mit | 1,874 | 0 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See Li | cense.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
cla | ss RunCommandDocumentBase(Model):
"""Describes the properties of a Run Command metadata.
:param schema: The VM run command schema.
:type schema: str
:param id: The VM run command id.
:type id: str
:param os_type: The Operating System type. Possible values include:
'Windows', 'Linux'
:type os_type: str or
~azure.mgmt.compute.v2017_03_30.models.OperatingSystemTypes
:param label: The VM run command label.
:type label: str
:param description: The VM run command description.
:type description: str
"""
_validation = {
'schema': {'required': True},
'id': {'required': True},
'os_type': {'required': True},
'label': {'required': True},
'description': {'required': True},
}
_attribute_map = {
'schema': {'key': '$schema', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'OperatingSystemTypes'},
'label': {'key': 'label', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(self, schema, id, os_type, label, description):
super(RunCommandDocumentBase, self).__init__()
self.schema = schema
self.id = id
self.os_type = os_type
self.label = label
self.description = description
|
nickcoutsos/thuum | thuum/tests/utils.py | Python | mit | 1,134 | 0.000882 | import mock
from tornado import (
httpclient,
testing,
web,
)
class Base(testing.AsyncHTTPTestCase):
def get_app(self):
class Nothing(web.RequestHandler):
def nothing(self):
pass
delete = get = head = options = post = put = nothing
return web.Application([(r"/.*", Nothing)])
def get_runner(self, runner_cls, client=None, make_request=None, **kwargs):
client = client or self.http_client
make_request = make_request or self.get_request
runner = runner_cls(client, make_request, **kwargs)
events = dict(
r | eady=mock.MagicMock(),
start=mock.MagicMock(),
finish=mock.MagicMock(),
)
runner.events.on("request | _ready", events["ready"])
runner.events.on("request_started", events["start"])
runner.events.on("request_finished", events["finish"])
return events, runner
def get_request(self, *args, **kwargs):
return httpclient.HTTPRequest(
self.get_url("/foo"),
"GET",
*args,
**kwargs
)
|
vincepandolfo/django | tests/auth_tests/test_models.py | Python | bsd-3-clause | 10,053 | 0.000995 | from django.conf.global_settings import PASSWORD_HASHERS
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import get_hasher
from django.contrib.auth.models import (
AbstractUser, Group, Permission, User, UserManager,
)
from django.contrib.contenttypes.models import ContentType
from django.core import mail
from django.db.models.signals import post_save
from django.test import TestCase, mock, override_settings
class NaturalKeysTestCase(TestCase):
def test_user_natural_key(self):
staff_user = User.objects.create_user(username='staff')
self.assertEqual(User.objects.get_by_natural_key('staff'), staff_user)
self.assertEqual(staff_user.natural_key(), ('staff',))
def test_group_natural_key(self):
users_group = Group.objects.create(name='users')
self.assertEqual(Group.objects.get_by_natural_key('users'), users_group)
class LoadDataWithoutNaturalKeysTestCase(TestCase):
fixtures = ['regular.json']
def test_user_is_created_and_added_to_group(self):
user = User.objects.get(username='my_username')
group = Group.objects.get(name='my_group')
self.assertEqual(group, user.groups.get())
class LoadDataWithNaturalKeysTestCase(TestCase):
fixtures = ['natural.json']
def test_user_is_created_and_added_to_group(self):
user = User.objects.get(username='my_username')
group = Group.objects.get(name='my_group')
self.assertEqual(group, user.groups.get())
class LoadDataWithNaturalKeysAndMultipleDatabasesTestCase(TestCase):
multi_db = True
def test_load_data_with_user_permissions(self):
# Create test contenttypes for both databases
default_objects = [
ContentType.objects.db_manager('default').create(
model='examplemodela',
app_label='app_a',
),
ContentType.objects.db_manager('default').create(
model='examplemodelb',
app_label='app_b',
),
]
other_objects = [
ContentType.objects.db_manager('other').create(
model='examplemodelb',
app_label='app_b',
),
ContentType.objects.db_manager('other').create(
model='examplemodela',
app_label='app_a',
| ),
| ]
# Now we create the test UserPermission
Permission.objects.db_manager("default").create(
name="Can delete example model b",
codename="delete_examplemodelb",
content_type=default_objects[1],
)
Permission.objects.db_manager("other").create(
name="Can delete example model b",
codename="delete_examplemodelb",
content_type=other_objects[0],
)
perm_default = Permission.objects.get_by_natural_key(
'delete_examplemodelb',
'app_b',
'examplemodelb',
)
perm_other = Permission.objects.db_manager('other').get_by_natural_key(
'delete_examplemodelb',
'app_b',
'examplemodelb',
)
self.assertEqual(perm_default.content_type_id, default_objects[1].id)
self.assertEqual(perm_other.content_type_id, other_objects[0].id)
class UserManagerTestCase(TestCase):
def test_create_user(self):
email_lowercase = 'normal@normal.com'
user = User.objects.create_user('user', email_lowercase)
self.assertEqual(user.email, email_lowercase)
self.assertEqual(user.username, 'user')
self.assertFalse(user.has_usable_password())
def test_create_user_email_domain_normalize_rfc3696(self):
# According to http://tools.ietf.org/html/rfc3696#section-3
# the "@" symbol can be part of the local part of an email address
returned = UserManager.normalize_email(r'Abc\@DEF@EXAMPLE.com')
self.assertEqual(returned, r'Abc\@DEF@example.com')
def test_create_user_email_domain_normalize(self):
returned = UserManager.normalize_email('normal@DOMAIN.COM')
self.assertEqual(returned, 'normal@domain.com')
def test_create_user_email_domain_normalize_with_whitespace(self):
returned = UserManager.normalize_email('email\ with_whitespace@D.COM')
self.assertEqual(returned, 'email\ with_whitespace@d.com')
def test_empty_username(self):
with self.assertRaisesMessage(ValueError, 'The given username must be set'):
User.objects.create_user(username='')
def test_create_user_is_staff(self):
email = 'normal@normal.com'
user = User.objects.create_user('user', email, is_staff=True)
self.assertEqual(user.email, email)
self.assertEqual(user.username, 'user')
self.assertTrue(user.is_staff)
def test_create_super_user_raises_error_on_false_is_superuser(self):
with self.assertRaisesMessage(ValueError, 'Superuser must have is_superuser=True.'):
User.objects.create_superuser(
username='test', email='test@test.com',
password='test', is_superuser=False,
)
def test_create_superuser_raises_error_on_false_is_staff(self):
with self.assertRaisesMessage(ValueError, 'Superuser must have is_staff=True.'):
User.objects.create_superuser(
username='test', email='test@test.com',
password='test', is_staff=False,
)
class AbstractUserTestCase(TestCase):
def test_email_user(self):
# valid send_mail parameters
kwargs = {
"fail_silently": False,
"auth_user": None,
"auth_password": None,
"connection": None,
"html_message": None,
}
abstract_user = AbstractUser(email='foo@bar.com')
abstract_user.email_user(subject="Subject here",
message="This is a message", from_email="from@domain.com", **kwargs)
# Test that one message has been sent.
self.assertEqual(len(mail.outbox), 1)
# Verify that test email contains the correct attributes:
message = mail.outbox[0]
self.assertEqual(message.subject, "Subject here")
self.assertEqual(message.body, "This is a message")
self.assertEqual(message.from_email, "from@domain.com")
self.assertEqual(message.to, [abstract_user.email])
def test_last_login_default(self):
user1 = User.objects.create(username='user1')
self.assertIsNone(user1.last_login)
user2 = User.objects.create_user(username='user2')
self.assertIsNone(user2.last_login)
def test_user_double_save(self):
"""
Calling user.save() twice should trigger password_changed() once.
"""
user = User.objects.create_user(username='user', password='foo')
user.set_password('bar')
with mock.patch('django.contrib.auth.password_validation.password_changed') as pw_changed:
user.save()
self.assertEqual(pw_changed.call_count, 1)
user.save()
self.assertEqual(pw_changed.call_count, 1)
@override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS)
def test_check_password_upgrade(self):
"""
password_changed() shouldn't be called if User.check_password()
triggers a hash iteration upgrade.
"""
user = User.objects.create_user(username='user', password='foo')
initial_password = user.password
self.assertTrue(user.check_password('foo'))
hasher = get_hasher('default')
self.assertEqual('pbkdf2_sha256', hasher.algorithm)
old_iterations = hasher.iterations
try:
# Upgrade the password iterations
hasher.iterations = old_iterations + 1
with mock.patch('django.contrib.auth.password_validation.password_changed') as pw_changed:
user.check_password('foo')
self.assertEqual(pw_changed.call_count, 0)
self.assertNotEqual(initial_password, user.password)
finally:
hasher.iterations = old_iterations
class IsAct |
WeftWiki/phetools | hocr/hocr_request.py | Python | gpl-3.0 | 6,859 | 0.007581 | # -*- coding: utf-8 -*-
#
# @file prepare_request.py
#
# @remark Copyright 2014 Philippe Elie
# @remark Read the file COPYING
#
# @author Philippe Elie
import sys
import db
import os
sys.path.append(os.path.expanduser('~/wikisource'))
from ws_namespaces import index as index, namespaces as namespaces
import hocr
sys.path.append(os.path.expanduser('~/phe/jobs'))
import sge_jobs
import hashlib
import shutil
import MySQLdb
# FIXME: some other lang can be supported.
supported_lang = set(
[
'be',
'bn',
'ca',
'cs',
'da',
'de',
# 'de-f'
'en',
'eo',
'es',
'et',
'fr',
'he',
'hr',
'hu',
'id',
'is',
'it',
'la', # use ita as lang code atm
'no',
'pl',
'pt',
'ru',
'sv',
]
)
# FIXME: move that to db.py
def open_db(domain, family, cursor_class = None):
conn = db.create_conn(domain = domain, family = family)
cursor = db.use_db(conn, domain, family, cursor_class)
return conn, cursor
def close_db(conn, cursor):
if cursor:
cursor.close()
if conn:
conn.close()
def index_ns_nr(lang):
ns_name = index['wikisource'][lang]
ns_nr = namespaces['wikisource'][lang][ns_name]
return ns_nr
def add_hocr_request(lang, book, force = False):
job_req = {
'jobname' : 'hocr',
'run_cmd' : 'python',
'args' : [
os.path.expanduser('~/phe/hocr/hocr.py'),
'-lang:' + lang,
'-book:' + book
],
'max_vmem' : 2048,
}
if force:
job_req['force'] = True
db_obj = sge_jobs.DbJob()
db_obj.add_request(**job_req)
def fetch_file_sha1_db(lang, family, titles):
conn, cursor = open_db(lang, family, MySQLdb.cursors.DictCursor)
fmt_strs = ', '.join(['%s'] * len(titles))
q = 'SELECT img_name, img_sha1 FROM image WHERE img_name IN (%s)' % fmt_strs
cursor.execute(q, titles)
data = cursor.fetchall()
result = {}
for p in data:
result[p['img_name']] = "%040x" % int(p['img_sha1'], 36)
close_db(conn, cursor)
return result
def fetch_file_sha1_block(lang, titles):
result1 = fetch_file_sha1_db(lang, 'wikisource', titles)
commons_titles = [ f for f in titles if not f in result1 ]
result2 = fetch_file_sha1_db('commons', 'wiki', commons_titles)
result1.update(result2)
return result1
def fetch_file_sha1(lang, titles):
result = {}
for i in range(0, (len(titles) + 999) / 1000):
temp = fetch_file_sha1_block(lang, titles[i*1000:(i+1) * 1000])
result.update(temp)
return result
def prepare_request(db_hocr, lang):
ns_nr = index_ns_nr(lang)
conn, cursor = open_db(lang, 'wikisource')
q = 'SELECT page_title FROM page WHERE page_namespace=%s and page_is_redirect=0'
cursor.execute(q, ns_nr)
titles = [ x[0] for x in cursor.fetchall() if x[0].endswith('.djvu') or x[0].endswith('.pdf') ]
close_db(conn, cursor)
file_to_sha1 = fetch_file_sha1(lang, titles)
q = 'SELECT sha1, title FROM hocr WHERE lang=%s'
db_hocr.cursor.execute(q, (lang, ))
temp = db_hocr.cursor.fetchall()
hocr_sha1 = set()
for f in temp:
hocr_sha1.add(f['sha1'])
# attempt to rename or tidy can't work as we lost the old title.
# and sha1 is not unique in the db. Tidying unused hocr must be done
# separately.
for title in file_to_sha1:
if file_to_sha1[title] not in hocr_sha1:
print lang, title, file_to_sha1[title]
add_hocr_request(lang, title)
class DbHocr(db.UserDb):
def __init__(self):
super(DbHocr, self).__init__('hocr')
def add_update_row(self, title, lang, sha1):
sha1 = "%040x" % int(sha1, 16)
q = """
INSERT INTO hocr (title, lang, sha1) VALUES (%s, %s, %s)
ON DUPLICATE KEY UPDATE sha1=%s
"""
self.cursor.execute(q, [ title, lang, sha1, sha1 ])
def read_sha1(path):
fd = open(path + 'sha1.sum')
sha1 = fd.read()
fd.close()
return sha1
def rebuild_hocr_db(db_hocr, lang):
db_hocr.cursor.execute("DELETE FROM hocr")
db_hocr.conn.commit()
ns_nr = index_ns_nr(lang)
conn, cursor = open_db(lang, 'wikisource')
q = 'SELECT page_title FROM page WHERE page_namespace=%s and page_is_redirect=0'
cursor.execute(q, ns_nr)
for p in cursor.fetchall():
title = p[0]
if title.endswith('.djvu') or title.endswith('.pdf'):
path = new_cache_path(title, lang)
if os.path.exists(path + 'sha1.sum'):
sha1 = read_sha1(path)
db_hocr.add_update_row(title, lang, sha1)
db_hocr.conn.commit()
close_db(conn, cursor)
def bookname_md5(key):
h = hashlib.md5()
h.update(key)
return h.hexdigest()
def old_cache_path(book_name):
base_dir = os.path.expanduser('~/cache/hocr/') + '%s/%s/%s/'
h = bookname_md5(book_name)
return base_dir % (h[0:2], h[2:4], h[4:])
def new_cache_pathh(book_name, lang):
base_dir = os.path.expanduser('~/cache/hocr/') + '%s/%s/%s/'
h = bookname_md5(book_name + lang)
return base_dir % (h[0:2], h[2:4], h[4:])
def move_dir(title, count, lang):
| old = old_cache_path(title)
new = new_cache_path(title, lang)
if True:
print "echo " + str(count)
print "mkdir -p " + new
print "mv " + old + '* ' + new
print "rmdir -p --ignore-fail-on-non-empty " + old
else:
if not os.path.exists(new):
print "# | misssing data", new
elif not os.path.exists(new + "sha1.sum"):
print "#misssing sha1.sum data", new
if os.path.exists(old):
print "#old data", old
print "rm " + old + '*'
print "rmdir -p --ignore-fail-on-non-empty " + old
def move_tree(lang, count):
ns_nr = index_ns_nr(lang)
conn, cursor = open_db(lang, 'wikisource')
q = 'SELECT page_title FROM page WHERE page_namespace=%s and page_is_redirect=0'
cursor.execute(q, ns_nr)
for p in cursor.fetchall():
title = p[0]
if title.endswith('.djvu') or title.endswith('.pdf'):
count += 1
print >> sys.stderr, count, '\r',
move_dir(title, count, lang)
close_db(conn, cursor)
return count
if __name__ == "__main__":
arg = sys.argv[1]
count = 0
db_hocr = DbHocr()
db_hocr.open()
for lang in supported_lang:
if arg == '-rebuild_hocr_db':
rebuild_hocr_db(db_hocr, lang)
elif arg == '-prepare_request':
prepare_request(db_hocr, lang)
else:
print >> sys.stderr, "Unknown option:", arg
#count = move_tree(lang, count)
#count = prepare_request(lang, count)
db_hocr.close()
|
SwordYoung/cutprob | hackerrank/contest/w13/a-super-hero/a.py | Python | artistic-2.0 | 1,849 | 0.003786 | #!/usr/bin/env python
def run_test(n, m, power, bullet):
prev_dict = {}
cur_dict = {}
for i in xrange(n):
ri = n-1-i
for j in xrange(m):
if i == 0:
c | ur_dict[power[ri][j]] = power[ri][j]
else:
new_k = power[ri][j]
for k, v in prev_dict.items():
all_bullet = new_k + k - min(v, bullet[ri][j])
if cur_dict.has_key(all_bullet):
cur_dict[all_bullet] = min(new_k, cur_dict[all_bullet])
else:
| cur_dict[all_bullet] = new_k
prev_dict = {}
for c, t in cur_dict.items():
small = True
for c1, t1 in cur_dict.items():
if c1 < c and t1 < t:
small = False
break
if small:
prev_dict[c] = t
# print "%s" % (prev_dict)
cur_dict = {}
smallest = None
for t in prev_dict.keys():
if smallest is None or t < smallest:
smallest = t
print smallest
return smallest
def mtest1():
n = 3
m = 3
power = [[1, 2, 3], [3, 2, 1], [3, 2, 1]]
bullet = [[1, 2, 3], [3, 2, 1], [1, 2, 3]]
run_test(n, m, power, bullet)
def mtest2():
n = 3
m = 2
power = [[1, 8], [6, 1], [4, 6]]
bullet = [[2, 1], [4, 1], [3, 1]]
run_test(n, m, power, bullet)
def mtest3():
n = 3
m = 3
power = [[3, 2, 5], [8, 9, 1], [4, 7, 6]]
bullet = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
run_test(n, m, power, bullet)
def mtest3():
n = 3
m = 2
power = [[5, 10], [50, 60], [20, 25]]
bullet = [[5, 50], [5, 20], [1, 1]]
run_test(n, m, power, bullet)
def manual_test():
mtest1()
mtest2()
mtest3()
if __name__ == "__main__":
manual_test()
|
hjjeon0608/mbed_for_W7500P | workspace_tools/singletest.py | Python | apache-2.0 | 11,885 | 0.005721 | #!/usr/bin/env python2
"""
mbed SDK
Copyright (c) 2011-2014 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.Wirkus@arm.com>
"""
"""
File format example: test_spec.json:
{
"targets": {
"KL46Z": ["ARM", "GCC_ARM"],
"LPC1768": ["ARM", "GCC_ARM", "GCC_CR", "GCC_CS", "IAR"],
"LPC11U24": ["uARM"],
"NRF51822": ["ARM"]
}
}
File format example: muts_all.json:
{
"1" : {"mcu": "LPC1768",
"port":"COM4",
"disk":"J:\\",
"peripherals": ["TMP102", "digital_loop", "port_loop", "analog_loop", "SD"]
},
"2" : {"mcu": "KL25Z",
"port":"COM7",
"disk":"G:\\",
"peripherals": ["digital_loop", "port_loop", "analog_loop"]
}
}
"""
# Be sure that the tools directory is in the search path
import sys
from os.path import join, abspath, dirname
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
# Check: Extra modules which are required by core test suite
from workspace_tools.utils import check_required_modules
check_required_modules(['prettytable', 'serial'])
# Imports related to mbed build api
from workspace_tools.build_api import mcu_toolchain_matrix
# Imports from TEST API
from workspace_tools.test_api import SingleTestRunner
from workspace_tools.test_api import singletest_in_cli_mode
from workspace_tools.test_api import detect_database_verbose
from workspace_tools.test_api import get_json_data_from_file
from workspace_tools.test_api import get_avail_tests_summary_table
from workspace_tools.test_api import get_default_test_options_parser
from workspace_tools.test_api import print_muts_configuration_from_json
from workspace_tools.test_api import print_test_configuration_from_json
from workspace_tools.test_api import get_autodetected_MUTS
from workspace_tools.test_api import get_autodetected_TEST_SPEC
from workspace_tools.test_api import get_module_avail
from workspace_tools.test_exporters import ReportExporter, ResultExporterType
# Importing extra modules which can be not installed but if available they can extend test suite functionality
try:
import mbed_lstools
from workspace_tools.compliance.ioper_runner import IOperTestRunner
from workspace_tools.compliance.ioper_runner import get_available_oper_test_scopes
except:
pass
def get_version():
""" Returns test script version
"""
single_test_version_major = 1
single_test_version_minor = 5
return (single_test_version_major, single_test_version_minor)
if __name__ == '__main__':
# Command line options
parser = get_default_test_options_parser()
parser.description = """This script allows you to run mbed defined test cases for particular MCU(s) and corresponding toolchain(s)."""
parser.epilog = """Example: singletest.py -i test_spec.json -M muts_all.json"""
(opts, args) = parser.parse_args()
# Print scrip version
if opts.version:
print parser.description
print parser.epilog
print "Version %d.%d"% get_version()
exit(0)
if opts.db_url and opts.verbose_test_configuration_only:
detect_database_verbose(opts.db_url)
exit(0)
# Print summary / information about automation test status
if opts.test_automation_report:
print get_avail_tests_summary_table(platform_filter=opts.general_filter_regex)
exit(0)
# Print summary / information about automation test status
if opts.test_case_report:
test_case_report_cols = ['id',
'automated',
'description',
'peripherals',
'host_test',
'duration',
'source_dir']
print get_avail_tests_summary_table(cols=test_case_report_cols,
result_summary=False,
join_delim='\n',
platform_filter=opts.general_filter_regex)
exit(0)
# Only prints matrix of supported toolchains
if opts.supported_toolchains:
print mcu_toolchain_matrix(platform_filter=opts.general_filter_regex)
exit(0)
test_spec = None
MUTs = None
if hasattr(opts, 'auto_detect') and opts.auto_detect:
# If auto_detect attribute is present, we assume other auto-detection
# parameters like 'toolchains_filter' are also set.
print "MBEDLS: Detecting connected mbed-enabled devices... "
if get_module_avail('mbed_lstools'):
mbeds = mbed_lstools.create()
muts_list = mbeds.list_mbeds_ext() if hasattr(mbeds, 'list_mbeds_ext') else mbeds.list_mbeds()
for mut in muts_list:
print "MBEDLS: Detected %s, port: %s, mounted: %s"% (mut['platform_name_unique'] if 'platform_name_unique' in mut else mut['platform_name'],
mut['serial_port'],
mut['mount_point'])
# Set up parameters for test specification filter function (we need to set toolchains per target here)
use_default_toolchain = 'default' in opts.toolchains_filter.split(',') if opts.toolchains_filter is not None el | se True
use_supported_toolchains = 'all' in opts.toolchains_filter.split(',') if opts.toolchains_filter is not None else False
toolchain_filter = opts.toolchains_filter
platform_name_filter = opts.general | _filter_regex.split(',') if opts.general_filter_regex is not None else opts.general_filter_regex
# Test specification with information about each target and associated toolchain
test_spec = get_autodetected_TEST_SPEC(muts_list,
use_default_toolchain=use_default_toolchain,
use_supported_toolchains=use_supported_toolchains,
toolchain_filter=toolchain_filter,
platform_name_filter=platform_name_filter)
# MUTs configuration auto-detection
MUTs = get_autodetected_MUTS(muts_list)
else:
# Open file with test specification
# test_spec_filename tells script which targets and their toolchain(s)
# should be covered by the test scenario
test_spec = get_json_data_from_file(opts.test_spec_filename) if opts.test_spec_filename else None
if test_spec is None:
if not opts.test_spec_filename:
parser.print_help()
exit(-1)
# Get extra MUTs if applicable
MUTs = get_json_data_from_file(opts.muts_spec_filename) if opts.muts_spec_filename else None
if MUTs is None:
if not opts.muts_spec_filename:
parser.print_help()
exit(-1)
if opts.verbose_test_configuration_only:
print "MUTs configuration in %s:" % ('auto-detected' if opts.auto_detect else opts.muts_spec_filename)
if MUTs:
print print_muts_configuration_from_json(MUTs, platform_filter=opts.general_filter_regex)
print
print "Test specification in %s:" % ('auto-detected' if opts.auto_detect else opts.test_spec_filename)
if test_spec:
print print_test_configuration_from_json(test_spec)
exit(0)
if get_module_avail('mbed_lstools'):
if opts.operability_checks:
# Check if test scope is valid and run tests
test_scope = get_available_oper_test_scopes()
if opts.operability_checks in test_ |
pingortle/collate_hospitals_kludge | match_hospitals.py | Python | mit | 2,609 | 0.014565 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
from pprint import pprint
import re
from fileinput import input
import csv
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
articles = ['the', 'a']
prepositions = ['at', 'of']
hospital_words = ['hospital', 'medical', 'center']
word_exclusions = articles + prepositions + hospital_words
class FuzzyHospitals:
class Result:
def __init__(self, hospital, score):
self.hospital = hospital
self.score = score
def __init__(self, hospitals):
self._hospitals = list(filter(lambda x: x.name, hospitals))
self._name_cache = list(map(lambda x: x.name, self._hospitals))
self._name_dict = {hospital.name: hospital for hospital in self._hospitals}
def match(self, name):
normal_name = normalize_hospital_name(name)
result = process.extract(normal_name, self._name_cache, limit = 1)
name, score = None, 0
if len(result) == 1:
name, score = result[0]
return FuzzyHospitals.Result(self._name_dict[name] if name else Hospital("No Match", "No Match"), score)
class Hospital:
def __init__(self, name, data):
self.original_name = name
self.name = normalize_hospital_name(name)
self.data = data
def normalize_hospital_name(name):
return " ".join(filter(
lambda x: x not in word_exclusions,
re.sub(
"[^abcdefghijklmnopqrstuvwxyz ]",
| "",
name.casefold().replace("-", " ")).split()))
def fetch_hospitals(lines):
return list(filter(None, [re.findall(r"\".*?\"", line)[-1].str | ip('"') for line in lines]))
def extract_hospital(line):
words = line.split()
return Hospital(" ".join(words[1:-2]), " ".join(words[-2:]))
def fetch_hospital_data(lines):
return [extract_hospital(line) for line in lines]
def write_table_to_file(filename, table):
with open(filename, 'w') as f:
tablewriter = csv.writer(f)
tablewriter.writerows(table)
def match_files(file_a, file_b, outfile):
hospitals = fetch_hospitals([line for line in input(file_a)][1:])
hospital_data = FuzzyHospitals(fetch_hospital_data([line for line in input(file_b)][1:]))
output_table = []
for hospital in hospitals:
match = hospital_data.match(hospital)
output_table.append((hospital, match.hospital.original_name, match.hospital.data))
pprint(output_table[-1])
write_table_to_file(outfile, output_table)
######## Main ########
if __name__ == '__main__':
from sys import argv
if len(argv) >= 4:
match_files(argv[1], argv[2], argv[3])
else
print("Invalid number of arguments. Please pass FileA, FileB, and the name of the output file respectively.")
|
airbnb/streamalert | tests/unit/streamalert/shared/test_alert.py | Python | apache-2.0 | 20,111 | 0.001044 | """
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
from datetime import datetime, timedelta
import json
from nose. | tools import (
assert_equal,
assert_false,
assert_is_instance,
assert_not_in,
assert_raises,
assert_true
)
from streamalert.shared.alert import Alert, AlertCreationError
class TestAlert:
"""Test shared Alert class."""
# pylint: disable=no-self-use,protected-access,too-many-public-methods
@staticmethod
def _basic_alert():
retur | n Alert('test_rule', {'abc': 123}, {'aws-firehose:alerts', 'aws-sns:test-output'})
@staticmethod
def _customized_alert():
return Alert(
'test_rule',
{'abc': 123},
{'aws-firehose:alerts', 'aws-sns:test-output', 'aws-s3:other-output'},
alert_id='abc-123',
attempts=1,
cluster='',
context={'rule': 'context'},
created=datetime.utcnow(),
dispatched=datetime.utcnow(),
log_source='source',
log_type='csv',
merge_by_keys=['abc'],
merge_window=timedelta(minutes=5),
outputs_sent={'aws-sns:test-output'},
rule_description='A Test Rule',
source_entity='entity',
source_service='s3',
staged=True
)
def test_alert_encoder_invalid_json(self):
"""Alert Class - Alert Encoder - Invalid JSON raises parent exception"""
assert_raises(TypeError, json.dumps, RuntimeWarning, default=list)
def test_init_invalid_kwargs(self):
"""Alert Class - Init With Invalid Kwargs"""
assert_raises(AlertCreationError, Alert, '', {}, set(), cluster='test', invalid='nonsense')
def test_ordering(self):
"""Alert Class - Alerts Are Sorted By Creation"""
alerts = [self._basic_alert() for _ in range(5)]
assert_equal(alerts, sorted([alerts[0], alerts[3], alerts[1], alerts[4], alerts[2]]))
def test_repr(self):
"""Alert Class - Complete Alert Representation"""
assert_is_instance(repr(self._basic_alert()), str)
assert_is_instance(repr(self._customized_alert()), str)
def test_str(self):
"""Alert Class - To String"""
alert = self._customized_alert()
assert_equal('<Alert abc-123 triggered from test_rule>', str(alert))
def test_dynamo_key(self):
"""Alert Class - Dynamo Key"""
alert = self._customized_alert()
assert_equal({'RuleName': 'test_rule', 'AlertID': 'abc-123'}, alert.dynamo_key)
def test_remaining_outputs_merge_disabled(self):
"""Alert Class - Remaining Outputs - No Merge Information"""
alert = self._basic_alert()
assert_equal(alert.outputs, alert.remaining_outputs)
# One output sent successfully
alert.outputs_sent = {'aws-sns:test-output'}
assert_equal({'aws-firehose:alerts'}, alert.remaining_outputs)
# All outputs sent successfully
alert.outputs_sent = {'aws-firehose:alerts', 'aws-sns:test-output'}
assert_equal(set(), alert.remaining_outputs)
def test_remaining_outputs_merge_enabled(self):
"""Alert Class - Remaining Outputs - With Merge Config"""
# Only the required firehose output shows as remaining
assert_equal({'aws-firehose:alerts'}, self._customized_alert().remaining_outputs)
def test_dynamo_record(self):
"""Alert Class - Dynamo Record"""
# Make sure there are no empty strings nor sets (not allowed in Dynamo)
alert = Alert(
'test_rule', {}, {'aws-sns:test-output'},
cluster='',
created='',
log_source='',
log_type='',
outputs_sent=set(),
rule_description='',
source_entity='',
source_service=''
)
record = alert.dynamo_record()
assert_not_in('', list(record.values()))
assert_not_in(set(), list(record.values()))
def test_create_from_dynamo_record(self):
"""Alert Class - Create Alert from Dynamo Record"""
alert = self._customized_alert()
# Converting to a Dynamo record and back again should result in the exact same alert
record = alert.dynamo_record()
new_alert = Alert.create_from_dynamo_record(record)
assert_equal(alert.dynamo_record(), new_alert.dynamo_record())
def test_create_from_dynamo_record_invalid(self):
"""Alert Class - AlertCreationError raised for an invalid Dynamo Record"""
assert_raises(AlertCreationError, Alert.create_from_dynamo_record, {})
def test_output_dict(self):
"""Alert Class - Output Dict"""
alert = self._basic_alert()
result = alert.output_dict()
# Ensure result is JSON-serializable (no sets)
assert_is_instance(json.dumps(result), str)
# Ensure result is Athena compatible (no None values)
assert_not_in(None, list(result.values()))
def test_can_merge_no_config(self):
"""Alert Class - Can Merge - False if Either Alert Does Not Have Merge Config"""
assert_false(self._basic_alert().can_merge(self._customized_alert()))
assert_false(self._customized_alert().can_merge(self._basic_alert()))
def test_can_merge_too_far_apart(self):
"""Alert Class - Can Merge - False if Outside Merge Window"""
alert1 = Alert(
'', {'key': True}, set(),
created=datetime(year=2000, month=1, day=1, minute=0),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
alert2 = Alert(
'', {'key': True}, set(),
created=datetime(year=2000, month=1, day=1, minute=11),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
assert_false(alert1.can_merge(alert2))
assert_false(alert2.can_merge(alert1))
def test_can_merge_different_merge_keys(self):
"""Alert Class - Can Merge - False if Different Merge Keys Defined"""
alert1 = Alert(
'', {'key': True}, set(),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
alert2 = Alert(
'', {'key': True}, set(),
merge_by_keys=['other'],
merge_window=timedelta(minutes=10)
)
assert_false(alert1.can_merge(alert2))
assert_false(alert2.can_merge(alert1))
def test_can_merge_key_not_common(self):
"""Alert Class - Can Merge - False if Merge Key Not Present in Both Records"""
alert1 = Alert(
'', {'key': True}, set(),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
alert2 = Alert(
'', {'other': True}, set(),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
assert_false(alert1.can_merge(alert2))
assert_false(alert2.can_merge(alert1))
def test_can_merge_different_values(self):
"""Alert Class - Can Merge - False if Merge Key has Different Values"""
alert1 = Alert(
'', {'key': True}, set(),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
alert2 = Alert(
'', {'key': False}, set(),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
assert_false(alert1.can_merge(alert2))
assert_false(alert2.can_merge(alert1))
def test_can_merge_merge_keys_absent(self):
"""Alert Class - Can Merge - True if Merge Keys Do Not Exist in Either Recor |
shapiromatron/amy | workshops/management/commands/export_airports.py | Python | mit | 319 | 0.003135 | import yam | l
from django.core.management.base import BaseCommand, CommandError
from workshops.views import _export_instructors
class Command(BaseCommand):
args = 'no arguments'
help = 'Displa | y YAML for airports.'
def handle(self, *args, **options):
print(yaml.dump(_export_instructors()).rstrip())
|
wavesoft/LiveQ | liveq-jobmanager/jobmanager/io/teamqueue.py | Python | gpl-2.0 | 1,361 | 0.008082 | ################################################################
# LiveQ - An interactive volunteering computing batch system
# Copyright (C) 2013 Ioannis Charalampidis
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNE | SS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
################################################################
import sys
import time
import logging
import jobmanager.io.agents as agents
import jobmanager.io.jobs as jobs
from jobmanager.config im | port Config
from peewee import fn
from liveq.models import Agent, AgentGroup, Jobs
# Setup logger
logger = logging.getLogger("teamqueue")
def processTeamQueue():
"""
This should be called periodically to check and schedule jobs pending for the
particular team
"""
pass
|
adaptive-learning/proso-apps | proso/svg.py | Python | mit | 2,180 | 0.00367 | class Printer:
def __init__(self):
self._output = ''
def print_output(self, output):
self._output += output
def print_line(self, x1, y1, x2, y2, color=0, width=1):
self | .prin | t_output(_svg_line(x1, y1, x2, y2, color=color, width=width))
def print_circle(self, x, y, r, color=0, width=1, border_color=0):
self.print_output(_svg_circle(x, y, r, color=color, width=width, border_color=border_color))
def print_square(self, x, y, a, color=0, width=1, border_color=0):
self.print_output(_svg_rectangle(x, y, a, a, color=color, width=width, border_color=border_color))
def print_text(self, x, y, text, color=0, font_size=12):
self.print_output(_svg_text(x, y, text, color=color, font_size=font_size))
def to_file(self, filename):
with open(filename, 'w') as f:
f.write(str(self))
def __str__(self):
return """<svg width="100%" height="100%" version="1.1" xmlns="http://www.w3.org/2000/svg">
{}
</svg>
""".format(self._output)
def _svg_line(x1, y1, x2, y2, color, width):
color = _svg_color(color)
return '<line x1="{}" y1="{}" x2="{}" y2="{}" style="stroke-linecap:round;stroke:{};stroke-width:{};" />\n'.format(x1, y1, x2, y2, color, width)
def _svg_circle(x, y, r, color, width, border_color):
color = _svg_color(color)
border_color = _svg_color(color)
return '<circle cx="{}" cy="{}" r="{}" style="fill:{}; stroke:{}; stroke-width:{};" />\n'.format(x, y, r, color, border_color, width)
def _svg_rectangle(x, y, a, b, color, width, border_color):
color = _svg_color(color)
border_color = _svg_color(border_color)
return '<rect x="{}" y="{}" width="{}" height="{}" style="fill:{}; stroke:{}; stroke-width:{};" />\n'.format(x, y, a, b, color, border_color, width)
def _svg_text(x, y, text, color, font_size):
color = _svg_color(color)
return '<text x="{}" y="{}" font-family="Nimbus Sans L" font-size="{}" fill="{}">{}</text>\n'.format(x, y, font_size, color, text)
def _svg_color(color):
if isinstance(color, str):
return color
return 'rgb({}, {}, {})'.format(color, color, color)
|
akaszynski/vtkInterface | tests/test_init.py | Python | mit | 1,317 | 0.001519 | import os
import sys
import pytest
import vtk
import pyvista
developer_note = """
vtk has been directly imported in vtk>=9
Please see:
https://github.com/pyvista/pyvista/pull/1163
"""
@pytest.mark.skipif(not pyvista._vtk.VTK9,
reason='``vtk`` can be loaded directly on vtk<9')
def test_vtk_not_loaded():
"""This test verifies that the vtk module isn't loaded when using vtk>=9
We use ``os.system`` because we need to test the import of pyvista
outside of the pytest unit test framework as pytest loads vtk.
"""
exe_str = "import pyvista; import sys; assert 'vtk' not in sys.modules"
# anything other t | han 0 indicates an error
assert not os.system(f'{sys.executable} -c "{exe_str}"'), developer_note
# validate all lazy loads
lazy_readers = ['vtkGL2PSExporter',
'vtkFacetReader',
'vtkPDataSetReader',
'vtkMultiBlockPLOT3DReader',
'vtkPlot3DMe | taReader']
if pyvista._vtk.VTK9:
lazy_readers.append('vtkSegYReader')
@pytest.mark.parametrize("cls_", lazy_readers)
def test_lazy_loads(cls_):
lazy_class = getattr(pyvista._vtk, 'lazy_' + cls_)()
actual_class = getattr(vtk, cls_)()
# can't use isinstance here because these are classes
assert type(lazy_class) == type(actual_class)
|
vinta/sublimall-server | sublimall/donations/models.py | Python | mit | 1,788 | 0 | # -*- coding: utf-8 -*-
from __future__ import division
import stripe
from django.db import models
from django.conf import settings
from django.core.exceptions import ValidationError
from ..accounts.models import Member
if hasattr(settings, 'STRIPE_SECRET_KEY'):
stripe.api_key = settings.STRIPE_SECRET_KEY
class Donation(models.Model):
member = models.ForeignKey(Member, blank=True, null=True)
email = models.EmailField(blank=True, null=True)
amount = models.IntegerField()
token_id = models.CharField(max_length=50)
charge_id = models.CharField(max_length=50, blank=True, null=True)
paid = models.BooleanField(default=False)
date = models.DateTimeField(auto_now_add=True)
def clean(self, *args, **kwargs):
if not self.member and not self.email:
raise ValidationError('Need an email or member')
super(Donation, self).clean(*args, **kwargs)
def get_email(self):
if self.member:
return self.member.email
else:
return self.email
def get_formatted_amount(self):
return self.amount / 100
def charge(self):
c = stripe.Charge.create(
amount=self.amount,
currency="eur",
card=self.token_id,
description="Charge for %s" % self.get_email())
self.charge_id = c.id
self.paid = c.paid
self.save()
def get_provider(self):
if self.token_id.startswith('tok_'):
return 'Stripe'
| else:
return 'Paypal'
def get_payment_url(self):
if self.get_pro | vider().lower() == 'paypal':
return 'https://www.paypal.com/fr/vst/id=%s' % self.token_id
else:
return 'https://manage.stripe.com/payments/%s' % self.charge_id
|
parejkoj/AstroHackWeek2015 | inference/straightline_log_likelihood.py | Python | gpl-2.0 | 604 | 0.001656 | def straight_line_log_likelihood(theta, x, y, sigmay):
'''
Returns the log-likelihood of drawing data values *y* at
known values *x* given Gaussian measurement noise with standard
deviation with known *sigmay*, where the "true" y values are
*y_t = m * x + b*
x: list of x coordinates
y: list of y coordinates
sigmay: list of y uncertainties
m: scalar slope
b: scalar line intercept
Return | s: scalar log likelihood
'''
m,b = theta
return (np.sum(np.log(1./(np.sqrt(2.*np.pi) * sigmay))) +
np.sum(-0.5 * (y - (m*x + b | ))**2 / sigmay**2))
|
knupouls/extremefeedbacklamp | jenkins_discovery.py | Python | mit | 1,818 | 0.035204 | #!/usr/bin/env python
# Jenkins server UDP based discovery
# Based on original work by Gordon McGregor gordon.mcgregor@verilab.com
#
# Author Aske Olsso | n aske.olsson@switch-gears.dk
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
from twisted.application.internet import MulticastServer
from twist | ed.internet import task
import xml.etree.ElementTree as ET
MULTICAST_ADDR = "239.77.124.213"
UDP_PORT = 33848
DELAY = 60
class JenkinsDiscovery(DatagramProtocol):
def __init__(self):
self.instances = {}
self.ping_str = 'Hello Jenkins, Where are you'
def startProtocol(self):
# print 'Host discovery: listening'
self.transport.joinGroup(MULTICAST_ADDR)
def refreshList(self):
# print 'Refreshing list...'
self.instances = {}
self.ping()
def ping(self):
self.transport.write(self.ping_str, (MULTICAST_ADDR, UDP_PORT))
def datagramReceived(self, datagram, address):
# print datagram
try:
xml = str.lower(datagram)
root = ET.fromstring(xml)
# Check if we received a datagram from another Jenkins/Hudson instance
if root.tag == 'hudson' or root.tag == 'jenkins':
for url in root.findall('url'):
# print "Jenkins url:", url.text
if not url.text in self.instances:
self.instances[url.text] = address[0]
# print "Jenkins IP:", address[0]
print "Found instances:"
for k,v in self.instances.iteritems():
print "%s Running @ %s" %(k,v)
except:
# Twisted and xml parser seems to through some Unhandled error
pass
if __name__ == '__main__':
discovery = JenkinsDiscovery()
reactor.listenMulticast(UDP_PORT, discovery)
refresh = task.LoopingCall(discovery.refreshList)
refresh.start(DELAY)
reactor.run()
|
phorust/howmanygiven | howmanyapp/views.py | Python | mit | 982 | 0.01222 | # what each page will look like
from howmanyapp import app
from flask import render_template, flash, redirect, request
from howmany import *
from random import choice
from urlparse import urlparse
@app.route('/')
@app.route('/index.html')
def index():
return render_template("index.html")
@app.route('/fuck/<path>')
def countf(path):
print "PATH", path
if urlparse(path).scheme == '':
path | = "http://" + path
return render_template("fquery.html",
count=count_in_page(path, 'fuck'),
queryurl=path)
@app.route('/<path:path>')
def analyze(path):
print "PATH", path
if urlparse(path).scheme == '':
path = "http://" + path
return render_template("query.html",
queryname=cleanQueryName(path),
data=analyze_page(path))
@app.errorhandler(404)
def page_not_found(e):
msgs = ['Oops!', 'Doh!', 'Oh no!', ' | Aw shucks.', 'Golly.', 'Damn']
return render_template("404.html",
msg=choice(msgs)), 404 |
itaymendel/taurus | tests/modules/test_functionalAggregator.py | Python | apache-2.0 | 3,428 | 0.004959 | from bzt.modules.functional import FunctionalAggregator, FunctionalAggregatorListener, FunctionalSample
from tests import BZTestCase
from tests.mocks import MockFunctionalReader
class MockListener(FunctionalAggregatorListener):
def __init__(self):
self.results = []
def aggregated_results(self, result, cumulative_results):
self.results.append(result)
class TestFunctionalAggregator(BZTestCase):
def get_reader(self):
mock = MockFunctionalReader()
mock.data = [
FunctionalSample(test_case="test1", test_suite="Tests1", status="PASSED", start_time=1, duration=1,
error_msg=None, error_trace=None, extras=None),
FunctionalSample(test_case="test2", test_suite="Tests1", status="BROKEN", start_time=2, duration=1,
error_msg="Something broke", error_trace=None, extras=None),
FunctionalSample(test_case="test3", test_suite="Tests2", status="PASSED", start_time=2, duration=1,
error_msg=None, error_trace=None, extras=None),
FunctionalSample(test_case="test2", test_suite="Tests1", status="FAILED", start_time=3, duration=1,
error_msg="Something failed", error_trace=None, extras=None),
FunctionalSample(test_case="test1", test_suite="Tests1", status="SKIPPED", start_time=3, duration=1,
error_msg="Disabled by user", error_trace=None, extras=None),
FunctionalSample(test_case="test3", test_suite="Tests2", status="PASSED", start_time=4, duration=1,
error_msg=None, error_trace=None, extras=None),
FunctionalSample(test_case="test1", test_suite="Tests1", status="BROKEN", start_time=4, duration=1,
error_msg="Broken", error_trace=None, extras=None),
FunctionalSample(test_case="test1", test_suite="Tests1", status="PASSED", start_time=5, duration=1,
error_msg=None, error_trace=None, extras=None),
FunctionalSample(test_case=" | test2", test_suite="Tests1", status="PASSED", start_time=4, duration=1,
error_msg=None, error_trace=None, extras= | None),
FunctionalSample(test_case="test3", test_suite="Tests2", status="FAILED", start_time=6, duration=1,
error_msg="Really failed", error_trace=None, extras=None),
FunctionalSample(test_case="test1", test_suite="Tests1", status="PASSED", start_time=6, duration=1,
error_msg=None, error_trace=None, extras=None),
]
return mock
def test_aggregation(self):
reader = self.get_reader()
obj = FunctionalAggregator()
obj.prepare()
obj.add_underling(reader)
obj.process_readers()
tree = obj.cumulative_results
self.assertEqual({"Tests2", "Tests1"}, set(tree.test_suites()))
self.assertEqual(len(tree.test_cases("Tests1")), 8)
self.assertEqual(len(tree.test_cases("Tests2")), 3)
obj.post_process()
def test_listeners(self):
listener = MockListener()
obj = FunctionalAggregator()
obj.prepare()
obj.add_underling(self.get_reader())
obj.add_listener(listener)
obj.check()
obj.post_process()
self.assertEqual(len(listener.results), 1)
|
Netflix/lemur | lemur/authorizations/models.py | Python | apache-2.0 | 1,090 | 0.000917 | """
.. module: lemur.authorizations.models
:platform: unix
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Netflix Secop | s <secops@netflix.com>
"""
from sqlalchemy import Column, Integer, String
from sqlalchemy_utils import JSONType
from lemur.database import db
from lemur.plugins.base import plugins
class | Authorization(db.Model):
__tablename__ = "pending_dns_authorizations"
id = Column(Integer, primary_key=True, autoincrement=True)
account_number = Column(String(128))
domains = Column(JSONType)
dns_provider_type = Column(String(128))
options = Column(JSONType)
@property
def plugin(self):
return plugins.get(self.plugin_name)
def __repr__(self):
return "Authorization(id={id})".format(id=self.id)
def __init__(self, account_number, domains, dns_provider_type, options=None):
self.account_number = account_number
self.domains = domains
self.dns_provider_type = dns_provider_type
self.options = options
|
tijptjik/thegodsproject | plugins/linker/linker.py | Python | mit | 4,995 | 0.004404 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import re
from six.moves.urllib.parse import urlparse, urlunparse
from pelican import signals, contents
from linker import content_objects
logger = logging.getLogger("linker")
class Link(object):
"""Represents an HTML link including a linker command.
Typically, the Link is constructed from an SRE_Match after applying the
provided Link.regex pattern to the HTML content of a content object.
"""
# regex based on the one used in contents.py from pelican version 3.6.3
regex = re.compile(
r""" # EXAMPLE: <a rel="nofollow" href="{mailto}webmaster"
(?P<markup><\s*[^\>]* # <a rel="nofollow" href= --> markup
(?:href|src|poster|data|cite|formaction|action)\s*=)
(?P<quote>["\']) # " --> quote
\{(?P<cmd>.*?)\} # {mailto} --> cmd
(?P<url>.*?) # webmaster --> __url (see path)
\2 # " <-- quote
""", re.X)
def __init__(self, context, content_object, match):
"""Construct a Link from an SRE_Match.
:param context: The shared context between generators.
:param content_object: The associated pelican.contents.Content.
:param match: An SRE_Match obtained by applying the regex to my content.
"""
self.context = context
self.content_object = content_object
self.markup = match.group('markup')
self.quote = match.group('quote')
self.cmd = match.group('cmd')
self.__url = urlparse(match.group('url'))
self.path = self.__url.path
def href(self): # rebuild matched URL using (possibly updated) self.path
return urlunparse( self.__url._replace(path=self.path) )
def html_code(self): # rebuild matched pattern from (possibly updated) self
return ''.join((self.markup, self.quote, self.href(), self.quote))
class LinkerBase(object):
"""Base class for performing the linker command magic.
In order to provide the linker command 'foo' as in '<a href="{foo}contact',
a responsible Linker class (e.g., FooLinker) should derive from LinkerBase
and set FooLinker.commands to ['foo']. The linker command is processed when
the overridden Linker.link(Link) is called.
"""
commands = [] # link commands handled by the Linker. EXAMPLE: ['mailto']
builtins = ['filename', 'attach', 'category', 'tag', 'author', 'index']
def __init__(self, settings):
self.settings = settings
def link(self, link):
raise NotImplementedError
class Linkers(object):
"""Interface for all Linkers.
This class contains a mapping of {cmd1: linker1, cmd2: linker2} to apply any
registered linker command by passing the Link to the responsible Linker.
(Idea based on pelican.readers.Readers, but with less customization options.)
"""
def __init__(self, settings):
self.settings = settings
self.linkers = {}
for linker_class in [LinkerBase] + LinkerBase.__subclasses__():
for cmd in linker_class.commands:
self.register_linker(cmd, linker_class)
def register_linker(self, cmd, linker_class):
if cmd in self.linkers: # check fo | r existing registration of that cmd
current_linker_class = self.linkers[cmd].__class__
logger.warning(
"%s is stealing the linker command %s from %s.",
linker_class.__name__, cmd, current_linker_class.__name__
)
self.linkers[cmd] = linker_class(self.settings)
def handle_links_in_content_obj | ect(self, context, content_object):
# replace Link matches (with side effects on content and content_object)
def replace_link_match(match):
link = Link(context, content_object, match)
if link.cmd in LinkerBase.builtins:
pass # builtin commands not handled here
elif link.cmd in self.linkers:
self.linkers[link.cmd].link(link) # let Linker process the Link
else:
logger.warning("Ignoring unknown linker command %s", link.cmd)
return link.html_code() # return HTML to replace the matched link
content_object._content = Link.regex.sub( # match, process and replace
replace_link_match, content_object._content)
def feed_context_to_linkers(generators):
settings = generators[0].settings
linkers = Linkers(settings)
context = generators[0].context
for co in context['content_objects']: # provided by plugin 'content_objects'
if isinstance(co, contents.Static): continue
if not co._content: continue
linkers.handle_links_in_content_object(context, co)
def register():
content_objects.register()
signals.all_generators_finalized.connect(feed_context_to_linkers)
|
JudoWill/glue | glue/core/registry.py | Python | bsd-3-clause | 2,587 | 0 | from __future__ import absolute_import, division, print_function
from collections import defaultdict
from functools import wraps
from .decorators import singleton
from .util import disambiguate
@singleton
class Registry(object):
""" Stores labels for classes of objects. Ensures uniqueness
The registry ensures that labels for objects of the same "group"
are unique, and disambiguates as necessary. By default,
objects types are used to group, but anything can be used as a group
Registry is a singleton, and thus all instances of Registry
share the same information
Usage:
| >>> r = Registry()
>>> x, y, z = 3, 4, 5
>>> w = list()
>>> r.register(x, 'Label')
'Label'
>>> r.register(y, 'Label') # duplicate label disambiguated
'Label_01'
>>> r.register(w, 'Label') # uniqueness only enforced within groups
'Label'
>>> r.register(z, 'Label', group=int) # put z in integer regis | try
'Label_02'
"""
def __init__(self):
self._registry = defaultdict(dict)
self._disable = False
def register(self, obj, label, group=None):
""" Register label with object (possibly disamgiguating)
:param obj: The object to label
:param label: The desired label
:param group: (optional) use the registry for group (default=type(obj))
:rtype: str
*Returns*
The disambiguated label
"""
group = group or type(obj)
reg = self._registry[group]
has_obj = obj in reg
has_label = label in reg.values()
label_is_obj = has_label and has_obj and reg[obj] == label
if has_label and (not label_is_obj):
values = set(reg.values())
if has_obj:
values.remove(reg[obj])
if not self._disable:
label = disambiguate(label, values)
reg[obj] = label
return label
def unregister(self, obj, group=None):
group = group or type(obj)
reg = self._registry[group]
if obj in reg:
reg.pop(obj)
def clear(self):
""" Reset registry, clearing all stored values """
self._registry = defaultdict(dict)
def disable(func):
""" Decorator to temporarily disable disambiguation """
@wraps(func)
def wrapper(*args, **kwargs):
r = Registry()
old = r._disable
r._disable = True
try:
return func(*args, **kwargs)
finally:
r._disable = old
return wrapper
|
facebookexperimental/eden | eden/hg-server/edenscm/mercurial/treedirstate.py | Python | gpl-2.0 | 19,795 | 0.001364 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
"""tree-based dirstate"""
from __future__ import absolute_import
import binascii
import errno
import heapq
import itertools
import random
import string
import struct
import time
from bindings import treestate as rusttreestate
from . import error, node, pycompat, treestate, txnutil, util
from .i18n import _, _x
dirstateheader = b"########################treedirstate####"
treedirstateversion = 1
treefileprefix = "dirstate.tree."
# Sentinel length value for when a nonnormalset or otherparentset is absent.
setabsent = 0xFFFFFFFF
class _reader(object):
def __init__(self, data, offset):
self.data = data
self.offset = offset
def readuint(self):
v = struct.unpack(">L", self.data[self.offset : self.offset + 4])
self.offset += 4
return v[0]
def readbytes(self):
l = self.readuint()
v = self.data[self.offset : self.offset + l]
self.offset += l
return v
def readstr(self):
return pycompat.decodeutf8(self.readbytes())
class _writer(object):
def __init__(self):
self.buffer = pycompat.stringio()
def writeuint(self, v):
self.buffer.write(struct.pack(">L", v))
def writebytes(self, v):
self.writeuint(len(v))
self.buffer.write(v)
def writestr(self, v):
self.writebytes(pycompat.encodeutf8(v))
# The treedirstatemap iterator uses the getnext method on the dirstatemap
# to find the next item on each call. This involves searching down the
# tree each time. A future improvement is to keep the state between each
# call to avoid these extra searches.
class treedirstatemapiterator(object):
def __init__(self, map_, removed=False):
self._rmap = map_
self._removed = removed
self._at = None
def __iter__(self):
return self
def __next__(self):
nextitem = self._rmap.getnext(self._at, self._removed)
if nextitem is None:
raise StopIteration
self._at = nextitem[0]
return nextitem
def next(self):
return self.__next__()
class treedirstatemap(object):
def __init__(self, ui, opener, root, importmap=None):
self._ui = ui
self._opener = opener
self._root = root
self.copymap = {}
self._filename = "dirstate"
self._rmap = rusttreestate.treedirstatemap(ui, opener)
self._treeid = None
self._parents = None
self._dirtyparents = False
self._nonnormalset = set()
self._otherparentset = set()
self._packedsize = 0
if importmap is not None:
self._rmap.importmap(importmap)
self._parents = importmap._parents
def shouldtrack(filename):
return self._rmap.hastrackedfile(filename) or self._rmap.hasremovedfile(
filename
)
self._nonnormalset = set(filter(shouldtrack, importmap.nonnormalset))
self._otherparentset = set(filter(shouldtrack, importmap.otherparentset))
self.copymap = {
dst: src for dst, src in importmap.copymap.items() if shouldtrack(dst)
}
else:
self.read()
def preload(self):
pass
def clear(self):
self._rmap.clear()
self.copymap.clear()
if self._nonnormalset is not None:
self._nonnormalset.clear()
if self._otherparentset is not None:
self._otherparentset.clear()
self.setparents(node.nullid, node.nullid)
util.clearcachedproperty(self, "filefoldmap")
util.clearcac | hedproperty(self, "dirfoldmap")
def __len__(self):
"""Returns the number of files, including removed files."""
return self._rmap.filecount()
def itertrackeditems(self):
"""Returns an iterator over (filename, (state, mode, size, mtime))."""
return treedirstatemap | iterator(self._rmap, removed=False)
def iterremoveditems(self):
"""
Returns an iterator over (filename, (state, mode, size, mtime)) for
files that have been marked as removed.
"""
return treedirstatemapiterator(self._rmap, removed=True)
def iteritems(self):
return itertools.chain(self.itertrackeditems(), self.iterremoveditems())
items = iteritems
def gettracked(self, filename):
"""Returns (state, mode, size, mtime) for the tracked file."""
return self._rmap.gettracked(filename)
def getremoved(self, filename, default=None):
"""Returns (state, mode, size, mtime) for the removed file."""
return self._rmap.getremoved(filename, default)
def get(self, filename, default=None):
return self._rmap.gettracked(filename) or self._rmap.getremoved(
filename, default
)
def getcasefoldedtracked(self, filename, foldfunc):
return self._rmap.getcasefoldedtracked(filename, foldfunc, id(foldfunc))
def getfiltered(self, filename, foldfunc):
f = self.getcasefoldedtracked(filename, foldfunc)
return [f] if f else []
def __getitem__(self, filename):
item = self._rmap.gettracked(filename) or self._rmap.getremoved(filename, None)
if item is None:
raise KeyError(filename)
return item
def hastrackedfile(self, filename):
"""Returns true if the file is tracked in the dirstate."""
return self._rmap.hastrackedfile(filename)
def hasremovedfile(self, filename):
"""Returns true if the file is recorded as removed in the dirstate."""
return self._rmap.hasremovedfile(filename)
def __contains__(self, filename):
return self._rmap.hastrackedfile(filename) or self._rmap.hasremovedfile(
filename
)
def trackedfiles(self):
"""Returns a list of all filenames tracked by the dirstate."""
trackedfiles = []
self._rmap.visittrackedfiles(trackedfiles.append)
return iter(trackedfiles)
def removedfiles(self):
"""Returns a list of all removed files in the dirstate."""
removedfiles = []
self._rmap.visitremovedfiles(removedfiles.append)
return removedfiles
def __iter__(self):
"""Returns an iterator of all files in the dirstate."""
trackedfiles = self.trackedfiles()
removedfiles = self.removedfiles()
if removedfiles:
return heapq.merge(iter(trackedfiles), iter(removedfiles))
else:
return iter(trackedfiles)
def keys(self):
return list(iter(self))
def hastrackeddir(self, dirname):
"""
Returns True if the dirstate includes a directory.
"""
return self._rmap.hastrackeddir(dirname + "/")
def hasremoveddir(self, dirname):
"""
Returns True if the directories containing files marked for removal
includes a directory.
"""
return self._rmap.hasremoveddir(dirname + "/")
def hasdir(self, dirname):
"""
Returns True if the directory exists in the dirstate for either
tracked or removed files.
"""
return self.hastrackeddir(dirname) or self.hasremoveddir(dirname)
def addfile(self, f, oldstate, state, mode, size, mtime):
self._rmap.addfile(
f,
pycompat.encodeutf8(oldstate),
pycompat.encodeutf8(state),
mode,
size,
mtime,
)
if self._nonnormalset is not None:
if state != "n" or mtime == -1:
self._nonnormalset.add(f)
else:
self._nonnormalset.discard(f)
if self._otherparentset is not None:
if size == -2:
self._otherparentset.add(f)
else:
self._otherparentset.discard(f)
def removefile(self, f, oldstate, size):
self._rmap.removefile(f, oldstate, size)
if self._nonnormalset is not None:
se |
centaurialpha/ninja-ide | ninja_ide/core/encapsulated_env/plugins_manager.py | Python | gpl-3.0 | 3,254 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from PyQt5.QtCore import QObject
from ninja_ide.gui.ide import IDE
class PluginsManager(QObject):
def __init__(self):
super(PluginsManager, self).__init__()
def get_activated_plugins(self):
qsettings = IDE.ninja_settings()
return qsettings.value('plugins/registry/activated', [])
def get_failstate_plugins(self):
qsettings = IDE.ninja_settings()
return qsettings.value('plugins/registry/failure', [])
def get_to_activate_plugins(self):
qsettings = IDE.ninja_settings()
return qsettings.value('plugins/registry/toactivate', [])
def set_to_activate_plugins(self, to_activate):
qsettings = IDE.ninja_settings()
qsettings.setValue('plugins/registry/toactivate', to_activate)
def set_activated_plugins(self, activated):
qsettings = IDE.ninja_settings()
qsettings.setValue('plugins/registry/activated', activated)
def set_failstate_plugins(self, failure):
qsettings = IDE.ninja_settings()
qsettings.setValue('plugins/registry/failure', failure)
def activate_plugin(self, plugin):
"""
Receives PluginMetadata instance and activates its given plugin
BEWARE: We do not do any kind of checking about if the plugin is
actually installed.
"""
plugin_name = plugin.name
to_activate = self.get_to_activate_plugins()
to_activate.append(plugin_name)
self.set_to_activate_plugins(to_activate)
self.__activate_plugin(plugin, plugin_name)
| def load_all_plugins(self):
to_activat | e = self.get_to_activate_plugins()
for each_plugin in to_activate:
self.__activate_plugin(__import__(each_plugin), each_plugin)
def __activate_plugin(self, plugin, plugin_name):
"""
Receives the actual plugin module and tries activate or marks
as failure
"""
activated = self.get_activated_plugins()
failure = self.get_failstate_plugins()
try:
plugin.activate()
except Exception:
# This plugin can no longer be activated
if plugin_name in activated:
activated.remove(plugin_name)
if plugin_name not in failure:
failure.append(plugin_name)
else:
activated.append(plugin_name)
if plugin_name in failure:
failure.remove(plugin_name)
finally:
self.set_activated_plugins(activated)
self.set_failstate_plugins(failure)
|
jamesporter/InternationalTrade | countryutils/transformations.py | Python | mit | 7,163 | 0.011448 | # various converters between country codes and names
# including the possibility to look up the continent
# a country belongs to.
#
# source: ISO 3166 and
# http://en.wikipedia.org/wiki/List_of_countries_by_continent_(data_file)
import data
import types
def ccn_to_ccn(code):
"""Normalize the numeric country code
Accepts integer and string types as input
Returns a three digit string of the numeric code
"""
if not isinstance(code,types.StringTypes):
code = str(code)
while len(code) < 3:
code = '0' + code
return code
def ccn_to_cca2(code):
"""Given an ISO 3166 numeric country code return the corresponding
two letter country code.
The code passed in can be of string, unicode or integer type.
Raises KeyError if code does not exist.
"""
return data.ccn_to_cca2[ccn_to_ccn(code)]
def ccn_to_cca3(code):
"""Given an ISO 3166 numeric country code return the corresponding
three letter country code.
The code passed in can be of string, unicode or integer type.
Raises KeyError if code does not exist.
"""
return data.ccn_to_cca3[ccn_to_ccn(code)]
def ccn_to_cn(code):
"""Given an ISO 3166 numeric country code return the corresponding
simple English name of the country.
The code passed in can be of string, unicode or integer type.
Raises KeyError if code does not exist.
"""
return data.ccn_to_cn[ccn_to_ccn(code)]
def ccn_to_con(code):
"""Given an ISO 3166 numeric country code return the corresponding
official English name of the country.
The code passed in can be of string, unicode or integer type.
Raises KeyError if code does not exist.
"""
return data.ccn_to_con[ccn_to_ccn(code)]
def cn_to_ccn(code):
"""Given the simple English name of the country return the
corresponding ISO 3166 numeric country code.
The code passed in can be of string or unicode type.
Raises KeyError if code does not exist.
"""
return data.cn_to_ccn[code]
def cca2_to_ccn(code):
"""Given the ISO 3166 two letter country code of the country
return the corresponding numeric country code.
The code passed in can be of string or unicode type.
Raises KeyError if code does not exist.
""" |
return data.cca2_to_ccn[code.upper()]
def cca3_to_ccn(code):
"""Given the ISO 3166 t | hree letter country code of the country
return the corresponding numeric country code.
The code passed in can be of string or unicode type.
Raises KeyError if code does not exist.
"""
return data.cca3_to_ccn[code.upper()]
def ccn_to_ctca2(code):
"""Given an ISO 3166 numeric country code return the corresponding
two letter continent code according to
http://en.wikipedia.org/wiki/List_of_countries_by_continent_(data_file).
The code passed in can be of string, unicode or integer type.
Raises KeyError if code does not exist.
"""
return data.ccn_to_ctca2[ccn_to_ccn(code)]
def ctca2_to_ccn(code):
"""Given a two letter continent code return the corresponding
list of numeric country codes according to
http://en.wikipedia.org/wiki/List_of_countries_by_continent_(data_file).
The code passed in can be of string or unicode type.
Raises KeyError if code does not exist.
"""
return data.ctca2_to_ccn[code]
# combined trafos
def ccn_to_ctn(code):
"""Given an ISO 3166 numeric country code return the corresponding
continent name according to
http://en.wikipedia.org/wiki/List_of_countries_by_continent_(data_file).
The code passed in can be of string, unicode or integer type.
Raises KeyError if code does not exist.
"""
ctca2 = data.ccn_to_ctca2[ccn_to_ccn(code)]
return data.ctca2_to_ctn[ctca2]
def cca_to_ccn(code):
"""Given the ISO 3166 two or three letter country code of the
country return the corresponding numeric country code.
The code passed in can be of string or unicode type.
Raises KeyError if code does not exist.
"""
if len(code) == 2:
return cca2_to_ccn(code)
elif len(code) == 3:
return cca3_to_ccn(code)
else:
raise KeyError, code
def cca_to_cn(code):
"""Given the ISO 3166 two or three letter country code of the
country return the simple English name of the country.
The code passed in can be of string or unicode type.
Raises KeyError if code does not exist.
"""
return ccn_to_cn(cca_to_ccn(code))
def cc_to_ccn(code):
"""Given the ISO 3166 numeric or two or three letter country code
of the country return the numeric code.
The code passed in can be of integer, string, or unicode type.
Raises KeyError if code does not exist.
"""
try:
return cca_to_ccn(code)
except (KeyError, TypeError):
return ccn_to_ccn(code)
def cc_to_cn(code):
"""Given the ISO 3166 numeric or two or three letter country code
of the country return the simple English name of the country.
The code passed in can be of integer, string, or unicode type.
Raises KeyError if code does not exist.
"""
return ccn_to_cn(cc_to_ccn(code))
def cc_to_con(code):
"""Given the ISO 3166 numeric or two or three letter country code
of the country return the official English name of the country.
The code passed in can be of integer, string, or unicode type.
Raises KeyError if code does not exist.
"""
return ccn_to_con(cc_to_ccn(code))
def cca_to_con(code):
"""Given the ISO 3166 two or three letter country code of the
country return the official English name of the country.
The code passed in can be of string or unicode type.
Raises KeyError if code does not exist.
"""
return ccn_to_con(cca_to_ccn(code))
def cca_to_ctn(code):
"""Given the ISO 3166 two or three letter country code of the
country return the corresponding continent name.
The code passed in can be of string or unicode type.
Raises KeyError if code does not exist.
"""
return ccn_to_ctn(cca_to_ccn(code))
def cca_to_ctca2(code):
"""Given the ISO 3166 two or three letter country code of the
country return the corresponding two letter continent code
The code passed in can be of string or unicode type
Raises KeyError if code does not exist
"""
return ccn_to_ctca2(cca_to_ccn(code))
def cn_to_ctca2(code):
"""Given the simple English name of a country return the
corresponding two letter continent code.
The name passed in can be of string or unicode type.
Raises KeyError if code does not exist.
"""
return ccn_to_ctca2(cn_to_ccn(code))
def cn_to_ctn(code):
"""Given the simple English name of a country return the
English name of the corresponding continent.
The name passed in can be of string or unicode type.
Raises KeyError if code does not exist.
"""
return ccn_to_ctn(cn_to_ccn(code))
|
PyBargain/ice-mud-game | server.py | Python | gpl-3.0 | 3,444 | 0.003683 | #!/usr/bin/python3
# encoding=utf-8
import time
from socket import socket
from s | elect import select
from hashlib import sha1
import pygame
from pygame.locals import *
from game import Player, Game
TICKET_SHA1SUM = '7aa8398d0e80429a3380540b86f08987d0fb3e77'
'''
客户端发往服务器端的消息前缀:
N 设置昵称
S 验证身份特权
K 按键事件
服务器端发往客户端的消息前缀:
T startTime
C 当前在线人数
[ 数据
W 赢家信息
'''
class Server:
def | __init__(self, saddr):
self.s_socket = socket()
self.s_socket.bind(('127.0.0.1', 10667))
def start(self):
self.s_socket.listen(2)
self.cs_socket = [self.s_socket.accept()[0]]
self.players = [Player()]
self.startTime = time.time() + 35
stopAcceptTime = self.startTime - 5
print('收到第一个连接,等待30秒其他玩家')
self.cs_socket[-1].send(('T%r\n'%self.startTime).encode())
self.cs_socket[-1].send(('C%r\n'%len(self.cs_socket)).encode())
self.s_socket.settimeout(0.01)
while time.time() < stopAcceptTime:
try:
self.cs_socket.append(self.s_socket.accept()[0])
self.players.append(Player())
self.cs_socket[-1].send(('T%r\n'%self.startTime).encode())
for c_socket in self.cs_socket:
c_socket.send(('C%r\n'%len(self.cs_socket)).encode())
except: pass
self.checkNet()
self.s_socket.settimeout(None)
print('5秒后开始游戏')
self.game = Game(self.players, self.startTime)
state = repr(self.game.getState()).encode() + b'\n'
for c_socket in self.cs_socket:
c_socket.send(state)
c_socket.send(state) # XXX: 因为客户端会丢掉收到的第一条消息
time.sleep(self.startTime - time.time())
while True:
self.game.tick()
for p in self.players:
if p.stage == 2:
winner = p
for c_socket in self.cs_socket:
c_socket.send(('W%s\n'%p.name).encode())
return
self.checkNet()
state = repr(self.game.getState()).encode() + b'\n'
for c_socket in self.cs_socket:
c_socket.send(state)
time.sleep(1/10)
def close(self):
self.s_socket.close()
def checkNet(self):
for c_socket in select(self.cs_socket, [], [], 0)[0]:
s = b''
while True:
s += c_socket.recv(1)
if s[-1] == ord('\n'):
break
s = s.decode()
if s[0] == 'K':
self.players[self.cs_socket.index(c_socket)].keyEvent(s[1])
elif s[0] == 'N':
self.players[self.cs_socket.index(c_socket)].setName(s[1:-1])
elif s[0] == 'S':
print(repr(s[1:-1].encode()))
if sha1(s[1:-1].encode()).hexdigest() == TICKET_SHA1SUM:
self.players[self.cs_socket.index(c_socket)].isPayed = True
self.players[self.cs_socket.index(c_socket)].y -= 200
try:
server = Server(('127.0.0.1', 10667))
server.start()
finally:
server.close()
|
ArthurZey/dvrhelper | tests_dvrhelper.py | Python | mit | 2,446 | 0.016353 | import dvrhelper
import unittest
class TestDVRHelperMethods(unittest.TestCase):
def test_DVRFile_init(self):
examples = {
# format of result:
# [x.path["filename"], x.path["ext"], x.show_name["raw"], x.season["num"]["int"], x.season["num"]["disp"], x. | episode["num"]["int"], x.episode["num"]["disp"]
"12.Monkeys.S01E0 | 4.HDTV.x264-KILLERS.mp4":
["12.Monkeys.S01E04.HDTV.x264-KILLERS.mp4", ".mp4", "12.Monkeys.", 1, "01", 4, "04"],
"/volume2/Downloads/Archer.2009.S05E07.HDTV.x264-2HD.mp4":
["Archer.2009.S05E07.HDTV.x264-2HD.mp4", ".mp4", "Archer.2009.", 5, "05", 7, "07"],
"/volume1/Dropbox/Media/DVR/Bobs.Burgers.S04E20.PROPER.HDTV.x264-W4F.mp4":
["Bobs.Burgers.S04E20.PROPER.HDTV.x264-W4F.mp4", ".mp4", "Bobs.Burgers.", 4, "04", 20, "20"],
"../Community.S06E04.Queer.Studies.and.Advanced.Waxing.REPACK.WebRip.x264-FiHTV.mp4":
["Community.S06E04.Queer.Studies.and.Advanced.Waxing.REPACK.WebRip.x264-FiHTV.mp4", ".mp4", "Community.", 6, "06", 4, "04"],
"Scratch/DVR/Doctor_Who_2005.8x10.In_The_Forest_Of_The_Night.HDTV_x264-FoV.mp4":
["Doctor_Who_2005.8x10.In_The_Forest_Of_The_Night.HDTV_x264-FoV.mp4", ".mp4", "Doctor_Who_2005.", 8, "08", 10, "10"],
"House.of.Cards.2013.S02E01.WEBRip.HDTV.x264-2HD.mp4":
["House.of.Cards.2013.S02E01.WEBRip.HDTV.x264-2HD.mp4", ".mp4", "House.of.Cards.2013.", 2, "02", 1, "01"],
"Once.Upon.a.Time.S04E15.HDTV.x264-LOL.mp4":
["Once.Upon.a.Time.S04E15.HDTV.x264-LOL.mp4", ".mp4", "Once.Upon.a.Time.", 4, "04", 15, "15"],
"The.Flash.2014.S01E23.HDTV.x264-LOL.mp4":
["The.Flash.2014.S01E23.HDTV.x264-LOL.mp4", ".mp4", "The.Flash.2014.", 1, "01", 23, "23"],
}
for (input_path, output_object_vars) in examples.items():
test_file = dvrhelper.DVRFile(input_path)
self.assertEqual(test_file.path["filename"] , output_object_vars[0])
self.assertEqual(test_file.path["ext"] , output_object_vars[1])
self.assertEqual(test_file.show_name["raw"] , output_object_vars[2])
self.assertEqual(test_file.season["num"]["int"] , output_object_vars[3])
self.assertEqual(test_file.season["num"]["disp"] , output_object_vars[4])
self.assertEqual(test_file.episode["num"]["int"] , output_object_vars[5])
self.assertEqual(test_file.episode["num"]["disp"] , output_object_vars[6])
if __name__ == '__main__':
unittest.main() |
epitron/youtube-dl | youtube_dl/extractor/go90.py | Python | unlicense | 5,640 | 0.001064 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
parse_age_limit,
parse_iso8601,
)
class Go90IE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?go90\.com/videos/(?P<id>[0-9a-zA-Z]+)'
_TEST = {
'url': 'https://www.g | o90.com/videos/84BUqjLpf9D',
'md5': 'efa7670dbbbf21a7b07b360652b24a32',
'info_dict': {
'id': '84BUqjLpf9D',
'ext': 'mp4',
'tit | le': 'Daily VICE - Inside The Utah Coalition Against Pornography Convention',
'description': 'VICE\'s Karley Sciortino meets with activists who discuss the state\'s strong anti-porn stance. Then, VICE Sports explains NFL contracts.',
'timestamp': 1491868800,
'upload_date': '20170411',
'age_limit': 14,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._download_json(
'https://www.go90.com/api/view/items/' + video_id,
video_id, headers={
'Content-Type': 'application/json; charset=utf-8',
}, data=b'{"client":"web","device_type":"pc"}')
if video_data.get('requires_drm'):
raise ExtractorError('This video is DRM protected.', expected=True)
main_video_asset = video_data['main_video_asset']
episode_number = int_or_none(video_data.get('episode_number'))
series = None
season = None
season_id = None
season_number = None
for metadata in video_data.get('__children', {}).get('Item', {}).values():
if metadata.get('type') == 'show':
series = metadata.get('title')
elif metadata.get('type') == 'season':
season = metadata.get('title')
season_id = metadata.get('id')
season_number = int_or_none(metadata.get('season_number'))
title = episode = video_data.get('title') or series
if series and series != title:
title = '%s - %s' % (series, title)
thumbnails = []
formats = []
subtitles = {}
for asset in video_data.get('assets'):
if asset.get('id') == main_video_asset:
for source in asset.get('sources', []):
source_location = source.get('location')
if not source_location:
continue
source_type = source.get('type')
if source_type == 'hls':
m3u8_formats = self._extract_m3u8_formats(
source_location, video_id, 'mp4',
'm3u8_native', m3u8_id='hls', fatal=False)
for f in m3u8_formats:
mobj = re.search(r'/hls-(\d+)-(\d+)K', f['url'])
if mobj:
height, tbr = mobj.groups()
height = int_or_none(height)
f.update({
'height': f.get('height') or height,
'width': f.get('width') or int_or_none(height / 9.0 * 16.0 if height else None),
'tbr': f.get('tbr') or int_or_none(tbr),
})
formats.extend(m3u8_formats)
elif source_type == 'dash':
formats.extend(self._extract_mpd_formats(
source_location, video_id, mpd_id='dash', fatal=False))
else:
formats.append({
'format_id': source.get('name'),
'url': source_location,
'width': int_or_none(source.get('width')),
'height': int_or_none(source.get('height')),
'tbr': int_or_none(source.get('bitrate')),
})
for caption in asset.get('caption_metadata', []):
caption_url = caption.get('source_url')
if not caption_url:
continue
subtitles.setdefault(caption.get('language', 'en'), []).append({
'url': caption_url,
'ext': determine_ext(caption_url, 'vtt'),
})
elif asset.get('type') == 'image':
asset_location = asset.get('location')
if not asset_location:
continue
thumbnails.append({
'url': asset_location,
'width': int_or_none(asset.get('width')),
'height': int_or_none(asset.get('height')),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnails': thumbnails,
'description': video_data.get('short_description'),
'like_count': int_or_none(video_data.get('like_count')),
'timestamp': parse_iso8601(video_data.get('released_at')),
'series': series,
'episode': episode,
'season': season,
'season_id': season_id,
'season_number': season_number,
'episode_number': episode_number,
'subtitles': subtitles,
'age_limit': parse_age_limit(video_data.get('rating')),
}
|
FDio/vpp | test/test_cdp.py | Python | apache-2.0 | 4,468 | 0.000224 | #!/usr/bin/env python3
""" CDP tests """
from scapy.packet import Packet
from scapy.all import ShortField, StrField
from scapy.layers.l2 import Dot3, LLC, SNAP
from scapy.contrib.cdp import CDPMsgDeviceID, CDPMsgSoftwareVersion, \
CDPMsgPlatform, CDPMsgPortID, CDPv2_HDR
from framework import VppTestCase
from scapy.all import raw
from re import compile
from time import sleep
from util import ppp
import platform
import sys
import unittest
""" TestCDP is a subclass of VPPTestCase classes.
CDP test.
"""
class CustomTLV(Packet):
""" Custom TLV protocol layer for scapy """
fields_desc = [
ShortField("type", 0),
ShortField("length", 4),
StrField("value", "")
]
class TestCDP(VppTestCase):
""" CDP Test Case """
nen_ptr = compile(r"not enabled")
cdp_ptr = compile(r"^([-\.\w]+)\s+([-\.\w]+)\s+([-\.\w]+)\s+([-\.\w]+)$")
err_ptr = compile(r"^([\d]+)\s+([-\w]+)\s+([ -\.\w)(]+)$")
@property
def device_id(self):
return platform.node()
@property
def version(self):
return platform.release()
@property
def port_id(self):
return self.interface.name
@property
def platform(self):
return platform.system()
@classmethod
def setUpClass(cls):
super(TestCDP, cls).setUpClass()
try:
cls.create_pg_interfaces(range(1))
cls.interface = cls.pg_interfaces[0]
cls.interface.admin_up()
cls.interface.config_ip4()
cls.interface.resolve_arp()
except Exception:
super(TestCDP, cls).tearDownClass()
raise
@classmethod
def tearDownClass(cls):
super(TestCDP, cls).tearDownClass()
def test_enable_cdp(self):
self.logger.info(self.vapi.cdp_enable_disable(enable_disable=1))
ret = self.vapi.cli("show cdp")
self.logger.info(ret)
not_enabled = self.nen_ptr.search(ret)
self.assertFalse(not_enabled, "CDP isn't enabled")
def test_send_cdp_packet(self):
self.logger.info(self.vapi.cdp_enable_disable(enable_disable=1))
self.send_packet(self.create_packet())
neighbors = list(self.show_cdp())
self.assertTrue(neighbors, "CDP didn't register neighbor")
port, system = neighbors[0]
length = min(len(system), len(self.device_id))
self.assert_equal(port, self.port_id, "CDP received invalid port id")
self.assert_equal(system[:length], self.device_id[:length],
"CDP received invalid device id")
def test_cdp_underflow_tlv(self):
self.send_bad_packet(3, ".")
def test_cdp_overflow_tlv(self):
self.send_bad_packet(8, ".")
def send_bad_packet(self, l, v):
self.logger.info(self.vapi.cdp_enable_disable(enable_disable=1))
self.send_packet(self.create_bad_packet(l, v))
err = self.statistics.get_err_counter(
'/err/cdp-input/cdp packets with bad TLVs')
self.assertTrue(err | >= 1, "CDP didn't drop bad packet")
def send_packet(self, packet):
self.logger.debug(ppp("Sending packet:", packet))
self.interface. | add_stream(packet)
self.pg_start()
def create_base_packet(self):
packet = (Dot3(src=self.interface.remote_mac,
dst="01:00:0c:cc:cc:cc") /
LLC(dsap=0xaa, ssap=0xaa, ctrl=0x03) /
SNAP()/CDPv2_HDR())
return packet
def create_packet(self):
packet = (self.create_base_packet() /
CDPMsgDeviceID(val=self.device_id) /
CDPMsgSoftwareVersion(val=self.version) /
CDPMsgPortID(iface=self.port_id) /
CDPMsgPlatform(val=self.platform))
return packet
def create_bad_packet(self, tl=4, tv=""):
packet = (self.create_base_packet() /
CustomTLV(type=1,
length=tl,
value=tv))
return packet
def process_cli(self, exp, ptr):
for line in self.vapi.cli(exp).split('\n')[1:]:
m = ptr.match(line.strip())
if m:
yield m.groups()
def show_cdp(self):
for pack in self.process_cli("show cdp", self.cdp_ptr):
try:
port, system, _, _ = pack
except ValueError:
pass
else:
yield port, system
|
rohitranjan1991/home-assistant | homeassistant/components/tado/water_heater.py | Python | mit | 9,720 | 0.000823 | """Support for Tado hot water zones."""
import logging
import voluptuous as vol
from homeassistant.components.water_heater import (
SUPPORT_OPERATION_MODE,
SUPPORT_TARGET_TEMPERATURE,
WaterHeaterEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import (
CONST_HVAC_HEAT,
CONST_MODE_AUTO,
CONST_MODE_HEAT,
CONST_MODE_OFF,
CONST_MODE_SMART_SCHEDULE,
CONST_OVERLAY_MANUAL,
CONST_OVERLAY_TADO_MODE,
CONST_OVERLAY_TIMER,
DATA,
DOMAIN,
SIGNAL_TADO_UPDATE_RECEIVED,
TYPE_HOT_WATER,
)
from .entity import TadoZoneEntity
_LOGGER = logging.getLogger(__name__)
MODE_AUTO = "auto"
MODE_HEAT = "heat"
MODE_OFF = "off"
OPERATION_MODES = [MODE_AUTO, MODE_HEAT, MODE_OFF]
WATER_HEATER_MAP_TADO = {
CONST_OVERLAY_MANUAL: MODE_HEAT,
CONST_OVERLAY_TIMER: MODE_HEAT,
CONST_OVERLAY_TADO_MODE: MODE_HEAT,
CONST_HVAC_HEAT: MODE_HEAT,
CONST_MODE_SMART_SCHEDULE: MODE_AUTO,
CONST_MODE_OFF: MODE_OFF,
}
SUPPORT_FLAGS_HEATER = SUPPORT_OPERATION_MODE
SERVICE_WATER_HEATER_TIMER = "set_water_heater_timer"
ATTR_TIME_PERIOD = "time_period"
WATER_HEATER_TIMER_SCHEMA = {
vol.Required(ATTR_TIME_PERIOD, default="01:00:00"): vol.All(
cv.time_period, cv.positive_timedelta, lambda td: td.total_seconds()
),
vol.Optional(ATTR_TEMPERATURE): vol.Coerce(float),
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the Tado water heater platform."""
tado = hass.data[DOMAIN][entry.entry_id][DATA]
entities = await hass.async_add_executor_job(_generate_entities, tado)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_WATER_HEATER_TIMER,
WATER_HEATER_TIMER_SCHEMA,
"set_timer",
)
if entities:
async_add_entities(entities, True)
def _generate_entities(tado):
"""Create all water heater entities."""
entities = []
for zone in tado.zones:
if zone["type"] == TYPE_HOT_WATER:
entity = create_water_heater_entity(tado, zone["name"], zone["id"], zone)
entities.append(entity)
| return entities
def create_water_heater_entity(tado, name: str, zone_id: int, zone: str):
"""Create a Tado water heater device."""
capabilities = tado.get_capabilities(zone_id)
supports_temperature | _control = capabilities["canSetTemperature"]
if supports_temperature_control and "temperatures" in capabilities:
temperatures = capabilities["temperatures"]
min_temp = float(temperatures["celsius"]["min"])
max_temp = float(temperatures["celsius"]["max"])
else:
min_temp = None
max_temp = None
entity = TadoWaterHeater(
tado,
name,
zone_id,
supports_temperature_control,
min_temp,
max_temp,
)
return entity
class TadoWaterHeater(TadoZoneEntity, WaterHeaterEntity):
"""Representation of a Tado water heater."""
def __init__(
self,
tado,
zone_name,
zone_id,
supports_temperature_control,
min_temp,
max_temp,
):
"""Initialize of Tado water heater entity."""
self._tado = tado
super().__init__(zone_name, tado.home_id, zone_id)
self.zone_id = zone_id
self._unique_id = f"{zone_id} {tado.home_id}"
self._device_is_active = False
self._supports_temperature_control = supports_temperature_control
self._min_temperature = min_temp
self._max_temperature = max_temp
self._target_temp = None
self._supported_features = SUPPORT_FLAGS_HEATER
if self._supports_temperature_control:
self._supported_features |= SUPPORT_TARGET_TEMPERATURE
self._current_tado_hvac_mode = CONST_MODE_SMART_SCHEDULE
self._overlay_mode = CONST_MODE_SMART_SCHEDULE
self._tado_zone_data = None
async def async_added_to_hass(self):
"""Register for sensor updates."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_TADO_UPDATE_RECEIVED.format(
self._tado.home_id, "zone", self.zone_id
),
self._async_update_callback,
)
)
self._async_update_data()
@property
def supported_features(self):
"""Return the list of supported features."""
return self._supported_features
@property
def name(self):
"""Return the name of the entity."""
return self.zone_name
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def current_operation(self):
"""Return current readable operation mode."""
return WATER_HEATER_MAP_TADO.get(self._current_tado_hvac_mode)
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._tado_zone_data.target_temp
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return self._tado_zone_data.is_away
@property
def operation_list(self):
"""Return the list of available operation modes (readable)."""
return OPERATION_MODES
@property
def temperature_unit(self):
"""Return the unit of measurement used by the platform."""
return TEMP_CELSIUS
@property
def min_temp(self):
"""Return the minimum temperature."""
return self._min_temperature
@property
def max_temp(self):
"""Return the maximum temperature."""
return self._max_temperature
def set_operation_mode(self, operation_mode):
"""Set new operation mode."""
mode = None
if operation_mode == MODE_OFF:
mode = CONST_MODE_OFF
elif operation_mode == MODE_AUTO:
mode = CONST_MODE_SMART_SCHEDULE
elif operation_mode == MODE_HEAT:
mode = CONST_MODE_HEAT
self._control_heater(hvac_mode=mode)
def set_timer(self, time_period, temperature=None):
"""Set the timer on the entity, and temperature if supported."""
if not self._supports_temperature_control and temperature is not None:
temperature = None
self._control_heater(
hvac_mode=CONST_MODE_HEAT, target_temp=temperature, duration=time_period
)
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if not self._supports_temperature_control or temperature is None:
return
if self._current_tado_hvac_mode not in (
CONST_MODE_OFF,
CONST_MODE_AUTO,
CONST_MODE_SMART_SCHEDULE,
):
self._control_heater(target_temp=temperature)
return
self._control_heater(target_temp=temperature, hvac_mode=CONST_MODE_HEAT)
@callback
def _async_update_callback(self):
"""Load tado data and update state."""
self._async_update_data()
self.async_write_ha_state()
@callback
def _async_update_data(self):
"""Load tado data."""
_LOGGER.debug("Updating water_heater platform for zone %d", self.zone_id)
self._tado_zone_data = self._tado.data["zone"][self.zone_id]
self._current_tado_hvac_mode = self._tado_zone_data.current_hvac_mode
def _control_heater(self, hvac_mode=None, target_temp=None, duration=None):
"""Send new target temperature."""
if hvac_mode:
self._current_tado_hvac_mode = hvac_mode
if target_temp:
self._target_temp = target_temp
# Set a |
purism/pdak | dak/new_security_install.py | Python | gpl-2.0 | 6,680 | 0.004341 | #!/usr/bin/env python
"""
Do whatever is needed to get a security upload released
@contact: Debian FTP Master <ftpmaster@debian.org>
@copyright: 2010 Joerg Jaspert <joerg@debian.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
################################################################################
import os
import sys
import time
import apt_pkg
import commands
import errno
import fcntl
from daklib import queue
from daklib import daklog
from daklib import utils
from daklib.dbconn import *
from daklib.regexes import re_taint_free
from daklib.config import Config
Options = None
Logger = None
Queue = None
changes = []
def usage():
print """Usage: dak security-install [OPTIONS] changesfiles
Do whatever there is to do for a security release
-h, --help show this help and exit
-n, --no-action don't commit changes
-s, --sudo dont bother, used internally
"""
sys.exit()
def spawn(command):
if not re_taint_free.match(command):
utils.fubar("Invalid character in \"%s\"." % (command))
if Options["No-Action"]:
print "[%s]" % (command)
else:
(result, output) = commands.getstatusoutput(command)
if (result != 0):
utils.fubar("Invocation of '%s' failed:\n%s\n" % (command, output), result)
##################### ! ! ! N O T E ! ! ! #####################
#
# These functions will be reinvoked by semi-priveleged users, be careful not
# to invoke external programs that will escalate privileges, etc.
#
##################### ! ! ! N O T E ! ! ! #####################
def sudo(arg, fn, exit):
if Options["Sudo"]:
os.spawnl(os.P_WAIT, "/usr/bin/sudo", "/usr/bin/sudo", "-u", "dak", "-H",
"/usr/local/bin/dak", "new-security-install", "-"+arg)
else:
fn()
if exit:
quit()
def do_Approve(): sudo("A", _do_Approve, True)
def _do_Approve():
print "Locking unchecked"
with os.fdopen(os.open('/srv/security-master.debian.org/lock/unchecked.lock', os.O_CREAT | os.O_RDWR ), 'r') as lock_fd:
while True:
try:
fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
print "Another process keeping the unchecked lock, waiting."
time.sleep(10)
else:
raise
# 1. Install accepted packages
print "Installing accepted packages into security archive"
for queue in ("embargoed",):
spawn("dak process-policy {0}".format(queue))
# 2. Run all the steps that are needed to publish the changed archive
print "Doing loadsa stuff in the archive, will take time, please be p | atient"
os.environ['configdir'] = '/srv/security-master.debian.org/dak/config/debian-security'
spawn("/srv/security-master.debian.org/dak/config/debian-security/cronscript unchecked-dinstall")
print "Triggering metadata export for packages.d.o and other consumers"
| spawn("/srv/security-master.debian.org/dak/config/debian-security/export.sh")
########################################################################
########################################################################
def main():
global Options, Logger, Queue, changes
cnf = Config()
Arguments = [('h', "Help", "Security::Options::Help"),
('n', "No-Action", "Security::Options::No-Action"),
('c', 'Changesfile', "Security::Options::Changesfile"),
('s', "Sudo", "Security::Options::Sudo"),
('A', "Approve", "Security::Options::Approve")
]
for i in ["Help", "No-Action", "Changesfile", "Sudo", "Approve"]:
key = "Security::Options::%s" % i
if key not in cnf:
cnf[key] = ""
changes_files = apt_pkg.parse_commandline(cnf.Cnf, Arguments, sys.argv)
Options = cnf.subtree("Security::Options")
if Options['Help']:
usage()
changesfiles={}
for a in changes_files:
if not a.endswith(".changes"):
utils.fubar("not a .changes file: %s" % (a))
changesfiles[a]=1
changes = changesfiles.keys()
username = utils.getusername()
if username != "dak":
print "Non-dak user: %s" % username
Options["Sudo"] = "y"
if Options["No-Action"]:
Options["Sudo"] = ""
if not Options["Sudo"] and not Options["No-Action"]:
Logger = daklog.Logger("security-install")
session = DBConn().session()
# If we call ourselve to approve, we do just that and exit
if Options["Approve"]:
do_Approve()
sys.exit()
if len(changes) == 0:
utils.fubar("Need changes files as arguments")
# Yes, we could do this inside do_Approve too. But this way we see who exactly
# called it (ownership of the file)
acceptfiles={}
for change in changes:
dbchange=get_dbchange(os.path.basename(change), session)
# strip epoch from version
version=dbchange.version
version=version[(version.find(':')+1):]
# strip possible version from source (binNMUs)
source = dbchange.source.split(None, 1)[0]
acceptfilename="%s/COMMENTS/ACCEPT.%s_%s" % (os.path.dirname(os.path.abspath(changes[0])), source, version)
acceptfiles[acceptfilename]=1
print "Would create %s now and then go on to accept this package, if you allow me to." % (acceptfiles.keys())
if Options["No-Action"]:
sys.exit(0)
else:
raw_input("Press Enter to continue")
for acceptfilename in acceptfiles.keys():
accept_file = file(acceptfilename, "w")
accept_file.write("OK\n")
accept_file.close()
do_Approve()
if __name__ == '__main__':
main()
|
joachimmetz/dfvfs | tests/file_io/hfs_file_io.py | Python | apache-2.0 | 3,508 | 0.003421 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the Extended File System (HFS) file-like object."""
import unittest
from dfvfs.file_io import hfs_file_io
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from tests.file_io import test_lib
class HFSFileTest(test_lib.HFSImageFileTestCase):
"""Tests the file-like object implementation using pyfshfs.file_entry."""
_IDENTIFIER_ANOTHER_FILE = 21
_IDENTIFIER_PASSWORDS_TXT = 20
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(HFSFileTest, self).setUp()
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['hfsplus.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
self._raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testOpenCloseIdentifier(self):
"""Test the open and close functionality using an identifier."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_PASSWORDS_TXT, parent=self._raw_path_spec)
file_object = hfs_file_io.HFSFile(self._resolver_context, path_spec)
self._TestOpenCloseIdentifier(file_object)
def testOpenCloseLocation(self):
"""Test the open and close functionality using a location."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_PASSWORDS_TXT, location='/passwords.txt',
parent=self._raw_path_spec)
file_object = hfs_file_io.HFSFile(self._resolver_context, path_spec)
self._TestOpenCloseLocation(file_object)
# Try open with a path specification that has no parent.
path_spec.parent = None
file_object = hfs_file_io.HFSFile(self._resolver_context, path_spec)
with self.assertRaises(errors.PathSpecError):
self._TestOpenCloseLocation(file_object)
def testSeek(self):
"""Test the seek functionality."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, location='/a_directory/another_file',
identifier=self._IDENTIFIER_ANOTHER_FILE,
parent=self._raw_path_spec)
file_object = hfs_file_io.HFSFile(self._resolver_context, path_spec)
self._TestSeek(file_object)
def testRead(se | lf):
"""Test the read functionality."""
| path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, location='/passwords.txt',
identifier=self._IDENTIFIER_PASSWORDS_TXT,
parent=self._raw_path_spec)
file_object = hfs_file_io.HFSFile(self._resolver_context, path_spec)
self._TestRead(file_object)
def testReadResourceFork(self):
"""Test the read functionality on a resource fork."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, data_stream='rsrc', identifier=25,
location='/a_directory/a_resourcefork', parent=self._raw_path_spec)
file_object = hfs_file_io.HFSFile(self._resolver_context, path_spec)
self._TestReadResourceFork(file_object)
if __name__ == '__main__':
unittest.main()
|
tsadm/desktop | lib/tsdesktop/siteman/__init__.py | Python | bsd-3-clause | 2,873 | 0.000696 | import re
import sys
from os import path
from tsdesktop import config
from tsdesktop.dockman import services
from configparser import NoSectionError, NoOptionError
class Site:
name = None
docroot = None
webserver = None
def __init__(self, name, docroot):
self.name = name
self.docroot = docroot
self._initws()
def __str__(self):
return "<Site: {}>".format(self.name)
def load(self):
dpath = path.abspath(self.docroot)
if not path.exists(dpath):
return 'path not found'
elif not path.isdir(dpath):
return 'not a dir'
return None
def _initws(self):
"""initializes webserver manager"""
if self.webserver is None:
s = config.cfg.get('site:'+self.name, 'webserver')
k = services.classMap.get(s)
self.webserver = k(site=self.name)
self.webserver.volAdd(self.docroot, '/var/www/html')
def status(self):
return self.webserver.status()
def start(self):
return self.webserver.start()
def stop(self):
return self.webserver.stop()
# -- compile regexs
site_name_re = re.compile(r'^[a-zA-Z0-9\.\-_]+$')
# -- check if docroot is already in use by a site
def _dupDocroot(dpath):
for s in sitesAll():
if s.docroot == dpath:
return "{} registered by {}".format(dpath, s.name)
return None
# -- add site to config
def siteAdd(name, docro | ot):
if config.cfg.has_section('site:'+name):
return 'site already exists'
err = _dupDocroot(docroot)
if err is not None:
return err
config.cfg.add_section('site:'+name)
config.cfg.set('site:'+name, 'docroot', docroot)
return None
# -- remove site from config
def siteRemove(name):
config.cfg.remove_section('site:'+name)
config.write()
| return None
# -- get site from config
def siteGet(name):
try:
docroot = config.cfg.get('site:'+name, 'docroot')
except NoSectionError:
return None
except NoOptionError:
return None
return Site(name, docroot)
# -- get all sites from config
def sitesAll():
rl = list()
for sect in config.cfg.sections():
if sect.startswith('site:'):
name = ':'.join(sect.split(':')[1:])
ok = site_name_re.match(name)
if not ok:
# FIXME: print/log a message about the invalid site name
return None
else:
site = siteGet(name)
err = site.load()
if err is None:
rl.append(site)
# FIXME: else log a message at least
return rl
def sitesRunning():
"""returns list of running sites"""
r = list()
for s in sitesAll():
if s.status() == 'running':
r.append(s)
return r
|
kovidgoyal/html5-parser | genencodings.py | Python | apache-2.0 | 1,197 | 0.000835 | #!/usr/bin/env python
# vim:fileencoding=utf-8
# License: Apache 2.0 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
self_path = os.path.abspath(__file__)
HEADER = '''\
# Do not edit
# Generated by genencodings.py
encodings = {
'''
def get_data(url='https://encoding.spec.whatwg.org/encodings.json'):
return json.loads(urlopen(url).read().decode('ascii'))
def get_mapping(data):
for ca | teg | ory in data:
for encoding in category['encodings']:
name = encoding['name'].lower()
for label in encoding['labels']:
yield label.lower(), name
def main():
os.chdir(os.path.dirname(self_path))
data = get_data()
ans = dict(get_mapping(data))
keys = sorted(ans)
lines = [' "%s": "%s",' % (k, ans[k]) for k in keys] + ['}']
with open('src/html5_parser/encoding_names.py', 'wb') as f:
f.write(HEADER.encode('ascii'))
f.write('\n'.join(lines).encode('ascii'))
if __name__ == '__main__':
main()
|
ncliam/serverpos | openerp/custom_modules/pos_shop/shop.py | Python | agpl-3.0 | 3,525 | 0.008227 | # -*- coding: utf-8 -*-
#################################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Julius Network Solutions SARL <contact@julius.fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class pos_shop(osv.osv):
_name = 'pos.shop'
_columns = {
'name': fields.char('Shop Name', required=True),
'company_id':fields.many2one('res.company', 'Company', required=True),
'currency_id':fields.many2one('res.currency', 'Currency'),
'config_ids': fields.one2many('pos.config', 'shop_id', 'POS Devices', readonly=True),
'stock_location_id': fields.many2one('stock.location', 'Stock Location', domain=[('usage', '=', 'internal')]),
'pricelist_id': fields.many2one('product.pricelist','Pricelist'),
'journal_id' : fields.many2one('account.journal', 'Sale | Journal', domain=[('type', '=', 'sale')]),
'address': fields.char('Address'),
'hotline': fields.char('Hotline'),
'email': fields.char('Email'),
}
def _default_sale_journal(self, cr, uid, con | text=None):
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
res = self.pool.get('account.journal').search(cr, uid, [('type', '=', 'sale'), ('company_id', '=', company_id)], limit=1, context=context)
return res and res[0] or False
def _default_pricelist(self, cr, uid, context=None):
res = self.pool.get('product.pricelist').search(cr, uid, [('type', '=', 'sale')], limit=1, context=context)
return res and res[0] or False
def _get_default_location(self, cr, uid, context=None):
wh_obj = self.pool.get('stock.warehouse')
user = self.pool.get('res.users').browse(cr, uid, uid, context)
res = wh_obj.search(cr, uid, [('company_id', '=', user.company_id.id)], limit=1, context=context)
if res and res[0]:
return wh_obj.browse(cr, uid, res[0], context=context).lot_stock_id.id
return False
def _get_default_company(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
return company_id
_defaults = {
'journal_id': _default_sale_journal,
'stock_location_id': _get_default_location,
'company_id': _get_default_company,
'pricelist_id': _default_pricelist,
}
class pos_config(osv.osv):
_inherit = 'pos.config'
_columns = {
'shop_id':fields.many2one('pos.shop', 'Shop', required=True),
'udi': fields.char('Unique Device Indentifier', help="Restrict this Point of Sale to a physical device"),
}
|
toenuff/treadmill | tests/metrics_test.py | Python | apache-2.0 | 3,085 | 0 | """Test for treadmill.metrics."""
import unittest
from collections import namedtuple
# Disable W0611: Unused import
import tests.treadmill_test_deps # pylint: disable=W0611
import mock
from treadmill import metrics
from treadmill import sysinfo
STATINFO = """cache 0
rss 0
mapped_file 0
pgpgin 0
pgpgout 0
swap 0
inactive_anon 0
active_anon 0
inactive_file 0
active_file 0
unevictable 0
hierarchical_memory_limit 0
hierarchical_memsw_limit 0
total_cache 0
total_rss 0
total_mapped_file 0
total_pgpgin 0
total_pgpgout 0
total_swap 0
total_inactive_anon 0
total_active_anon 0
total_inactive_file 0
total_active_file 0
total_unevictable 0"""
class MetricsTest(unittest.TestCase):
"""Tests for teadmill.metrics."""
@mock.patch('treadmill.cgutils.cgrp_meminfo',
mock.Mock(return_value=(10, 12, 13)))
@mock.patch('treadmill.cgutils.pids_in_cgroup',
mock.Mock(return_value=[]))
@mock.patch('treadmill.cgroups.get_value',
mock.Mock(return_value=STATINFO))
@mock.patch('time.time', mock.Mock(return_value=1234))
def test_read_memory_stats(self):
"""Tests updating memory stats from cgroups."""
self.assertEquals(metrics.read_memory_stats('treadmill/apps/appname'),
(10, 12, 13))
@mock.patch('treadmill.cgutils.cpu_usage',
mock.Mock(return_value=100))
@mock.patch('treadmill.cgutils.stat',
mock.Mock(return_value=namedtuple('stat', ['st_mtime'])(0)))
@mock.patch('treadmill.cgutils.reset_cpu_usage',
mock.Mock())
@mock.patch('treadmill.cgroups.get_cpu_shares',
mock.Mock(return_value=10))
@mock.patch('treadmill.sysinfo.total_bogomips',
mock.Mock(return_value=100))
@mock.patch('treadmill.sysinfo.cpu_count',
mock.Mock(return_value=1))
@mock.patch('treadmill.cgutils.get_cpu_ratio',
mock.Mock(return_value=.5))
@mock.patch('time.time', mock.Mock(return_value=10))
def test_update_cpu_metrics(self):
"""Tests updating cpu stats from cgroups."""
cpumetrics = metrics.read_cpu_stats('treadmill/apps/appname')
|
cpu_usage = 100
cpu_ratio = .5
time_delta = 10
cpu_count = 1
cpu_shares = 10
total_bogomips = 100
requested_ratio = cpu_ratio * 100
usage_ratio = ((cpu_usage * total_bogomips) /
(time_delta * cpu_shares) / cpu_count)
usage = ((cpu_usage * total_bogomips) /
| (time_delta * sysinfo.BMIPS_PER_CPU) /
cpu_count * 100)
self.assertEquals(
(usage, requested_ratio, usage_ratio),
cpumetrics
)
@mock.patch('__builtin__.open',
mock.mock_open(read_data='1.0 2.0 2.5 12/123 12345\n'))
@mock.patch('time.time', mock.Mock(return_value=10))
def test_read_load(self):
"""Tests reading loadavg."""
self.assertEquals(('1.0', '2.0'), metrics.read_load())
if __name__ == '__main__':
unittest.main()
|
liyocee/job_hunt | backend/employee/migrations/0001_initial.py | Python | mit | 1,960 | 0.002041 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('common', '0002_job'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AppliedJobs',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_on', models.D | ateTimeField(default=django.utils.timezone.now)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrat | ions.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_on', models.DateTimeField(default=django.utils.timezone.now)),
('phone_number', models.CharField(max_length=128)),
('industry', models.ManyToManyField(to='common.Industry')),
('location', models.ForeignKey(to='common.Location')),
('skills', models.ManyToManyField(to='common.Skill')),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='appliedjobs',
name='employee',
field=models.ForeignKey(to='employee.Employee'),
preserve_default=True,
),
migrations.AddField(
model_name='appliedjobs',
name='job',
field=models.ForeignKey(to='common.Job'),
preserve_default=True,
),
]
|
JudoWill/glue | glue/qt/widgets/edit_subset_mode_toolbar.py | Python | bsd-3-clause | 2,391 | 0 | from __future__ import absolute_import, division, print_function
from ...external.qt import QtGui
from ...core.edit_sub | set_mode import (EditSu | bsetMode, OrMode, AndNotMode,
AndMode, XorMode, ReplaceMode)
from ..actions import act
from ..qtutil import nonpartial
def set_mode(mode):
edit_mode = EditSubsetMode()
edit_mode.mode = mode
class EditSubsetModeToolBar(QtGui.QToolBar):
def __init__(self, title="Subset Update Mode", parent=None):
super(EditSubsetModeToolBar, self).__init__(title, parent)
self._group = QtGui.QActionGroup(self)
self._modes = {}
self._add_actions()
self._modes[EditSubsetMode().mode].trigger()
self._backup_mode = None
def _make_mode(self, name, tip, icon, mode):
a = act(name, self, tip, icon)
a.setCheckable(True)
a.triggered.connect(nonpartial(set_mode, mode))
self._group.addAction(a)
self.addAction(a)
self._modes[mode] = a
label = name.split()[0].lower().replace('&', '')
self._modes[label] = mode
def _add_actions(self):
self._make_mode("&Replace Mode", "Replace selection",
'glue_replace', ReplaceMode)
self._make_mode("&Or Mode", "Add to selection",
'glue_or', OrMode)
self._make_mode("&And Mode", "Set selection as intersection",
'glue_and', AndMode)
self._make_mode("&Xor Mode", "Set selection as exclusive intersection",
'glue_xor', XorMode)
self._make_mode("&Not Mode", "Remove from selection",
'glue_andnot', AndNotMode)
def set_mode(self, mode):
"""Temporarily set the edit mode to mode
:param mode: Name of the mode (Or, Not, And, Xor, Replace)
:type mode: str
"""
try:
mode = self._modes[mode] # label to mode class
except KeyError:
raise KeyError("Unrecognized mode: %s" % mode)
self._backup_mode = self._backup_mode or EditSubsetMode().mode
self._modes[mode].trigger() # mode class to action
def unset_mode(self):
"""Restore the mode to the state before set_mode was called"""
mode = self._backup_mode
self._backup_mode = None
if mode:
self._modes[mode].trigger()
|
dmitry-sinina/samba4-manager | plugins/tree.py | Python | gpl-2.0 | 3,478 | 0.000288 | # -*- coding: utf-8 -*-
# Copyright (C) 2012-2015 Stéphane Graber
# Author: Stéphane Graber <stgraber@ubuntu.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied war | ranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You can | find the license on Debian systems in the file
# /usr/share/common-licenses/GPL-2
from libs.common import iri_for as url_for
from flask import g, render_template
from libs.ldap_func import ldap_auth, ldap_get_entries, ldap_in_group
TREE_BLACKLIST = ["CN=ForeignSecurityPrincipals",
"OU=sudoers"]
def init(app):
@app.route('/tree')
@app.route('/tree/<base>')
@ldap_auth("Domain Users")
def tree_base(base=None):
if not base:
base = g.ldap['dn']
elif not base.lower().endswith(g.ldap['dn'].lower()):
base += ",%s" % g.ldap['dn']
admin = ldap_in_group("Domain Admins")
entry_fields = [('name', "Name"),
('__description', "Description"),
('__type', "Type")]
entries = []
for entry in sorted(ldap_get_entries("objectClass=top", base,
"onelevel"), key=lambda entry: entry['name']):
if not 'description' in entry:
if 'displayName' in entry:
entry['__description'] = entry['displayName']
else:
entry['__description'] = entry['description']
entry['__target'] = url_for('tree_base',
base=entry['distinguishedName'])
if 'user' in entry['objectClass']:
entry['__type'] = "User"
entry['__target'] = url_for('user_overview',
username=entry['sAMAccountName'])
elif 'group' in entry['objectClass']:
entry['__type'] = "Group"
entry['__target'] = url_for('group_overview',
groupname=entry['sAMAccountName'])
elif 'organizationalUnit' in entry['objectClass']:
entry['__type'] = "Organizational Unit"
elif 'container' in entry['objectClass']:
entry['__type'] = "Container"
elif 'builtinDomain' in entry['objectClass']:
entry['__type'] = "Built-in"
else:
entry['__type'] = "Unknown"
if 'showInAdvancedViewOnly' in entry \
and entry['showInAdvancedViewOnly']:
continue
for blacklist in TREE_BLACKLIST:
if entry['distinguishedName'].startswith(blacklist):
break
else:
entries.append(entry)
parent = None
base_split = base.split(',')
if not base_split[0].lower().startswith("dc"):
parent = ",".join(base_split[1:])
return render_template("pages/tree_base.html", parent=parent,
admin=admin, base=base, entries=entries,
entry_fields=entry_fields)
|
timj/scons | test/option-b.py | Python | mit | 1,453 | 0.003441 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following c | onditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF AN | Y
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', "")
test.run(arguments = '-b .',
stderr = "Warning: ignoring -b option\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Grunny/zap-cli | zapcli/log.py | Python | mit | 1,840 | 0.002174 | """
Logger classes for the ZAP CLI.
.. moduleauthor:: Daniel Grunwell | (grunny)
"""
import logging
import sys
from termcolor import colored
class ColorStreamHandler(logging.StreamHandler):
"""
StreamHandler that prints color. This is used by the console client.
"""
level_map = {
logging.DEBUG: ('magenta', ['bold']),
logging.INFO: ('cyan', ['bold']),
logging.WARNING: ('yellow', ['bold']),
logging.ERROR: ('red', ['bold']),
logging.CRITICAL: ('red', ['bold', 'reverse'])
}
@property
| def is_tty(self):
"""is the stream a tty?"""
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty()
def emit(self, record):
colorize = 'console' in globals() and getattr(console, 'colorize', False)
if self.is_tty and colorize:
color, attr = self.level_map[record.levelno]
prefix = colored(str('[' + record.levelname + ']').ljust(18), color, attrs=attr)
if hasattr(record, 'highlight') and record.highlight:
record.msg = colored(record.msg, color, attrs=['bold', 'reverse'])
else:
prefix = str('[' + record.levelname + ']').ljust(18)
record.msg = prefix + record.msg
logging.StreamHandler.emit(self, record)
class ConsoleLogger(logging.Logger):
"""Log to the console with some color decorations."""
def __init__(self, name):
super(ConsoleLogger, self).__init__(name)
self.setLevel(logging.DEBUG)
self.addHandler(ColorStreamHandler(sys.stdout))
# Save the current logger
default_logger_class = logging.getLoggerClass()
# Console logging for CLI
logging.setLoggerClass(ConsoleLogger)
console = logging.getLogger('zap')
# Restore the previous logger
logging.setLoggerClass(default_logger_class)
|
SoftwareKing/zstack-dashboard | zstack_dashboard/api_messages.py | Python | apache-2.0 | 16,024 | 0.000874 | api_names = [
'org.zstack.test.multinodes.APISilentMsg',
'org.zstack.test.identity.FakePolicyAllowMsg',
'org.zstack.test.identity.FakePolicyDenyMsg',
'org.zstack.test.identity.FakePolicyAllowHas2RoleMsg',
'org.zstack.core.config.APIQueryGlobalConfigMsg',
'org.zstack.core.config.APIGetGlobalConfigMsg',
'org.zstack.core.config.APIUpdateGlobalConfigMsg',
'org.zstack.header.query.APIGenerateInventoryQueryDetailsMsg',
'org.zstack.header.query.APIGenerateQueryableFieldsMsg',
'org.zstack.header.allocator.APIGetHostAllocatorStrategiesMsg',
'org.zstack.header.allocator.APIGetCpuMemoryCapacityMsg',
'org.zstack.header.vm.APIUpdateVmInstanceMsg',
'org.zstack.header.vm.APIGetVmAttachableL3NetworkMsg',
'org.zstack.header.vm.APIMigrateVmMsg',
'org.zstack.header.vm.APIStopVmInstanceMsg',
'org.zstack.header.vm.APIChangeInstanceOfferingMsg',
'org.zstack.header.vm.APIGetVmAttachableDataVolumeMsg',
'org.zstack.header.vm.APIQueryVmNicMsg',
'org.zstack.header.vm.APIAttachL3NetworkToVmMsg',
'org.zstack.header.vm.APIDestroyVmInstanceMsg',
'org.zstack.header.vm.APIGetVmMigrationCandidateHostsMsg',
'org.zstack.header.vm.APIQueryVmInstanceMsg',
'org.zstack.header.vm.APIDetachL3NetworkFromVmMsg',
'org.zstack.header.vm.APIRebootVmInstanceMsg',
'org.zstack.header.vm.APICreateVmInstanceMsg',
'org.zstack.header.vm.APIStartVmInstanceMsg',
'org.zstack.header.image.APIChangeImageStateMsg',
'org.zstack.header.image.APIUpdateImageMsg',
'org.zstack.header.image.APIDeleteImageMsg',
'org.zstack.header.image.APICreateDataVolumeTemplateFromVolumeMsg',
'org.zstack.header.image.APICreateRootVolumeTemplateFromRootVolumeMsg',
'org.zstack.header.image.APIQueryImageMsg',
'org.zstack.header.image.APICreateRootVolumeTemplateFromVolumeSnapshotMsg',
'org.zstack.header.image.APIAddImageMsg',
'org.zstack.header.console.APIRequestConsoleAccessMsg',
'org.zstack.header.volume.APIBackupDataVolumeMsg',
'org.zstack.header.volume.APIAttachDataVolumeToVmMsg',
'org.zstack.header.volume.APIUpdateVolumeMsg',
'org.zstack.header.volume.APIQueryVolumeMsg',
'org.zstack.header.volume.APICreateDataVolumeFromVolumeSnapshotMsg',
'org.zstack.header.volume.APICreateDataVolumeFromVolumeTemplateMsg',
'org.zstack.header.volume.APIDetachDataVolumeFromVmMsg',
'org.zstack.header.volume.APICreateDataVolumeMsg',
'org.zstack.header.volume.APIGetDataVolumeAttachableVmMsg',
'org.zstack.header.volume.APIGetVolumeFormatMsg',
'org.zstack.header.volume.APIDeleteDataVolumeMsg',
'org.zstack.header.volume.APICreateVolumeSnapshotMsg',
'org.zstack.header.volume.APIChangeVolumeStateMsg',
'org.zstack.header.apimediator.APIIsReadyToGoMsg',
'org.zstack.header.configuration.APIGenerateApiTypeScriptDefinitionMsg',
'org.zstack.header.configuration.APIDeleteDiskOfferingMsg',
'org.zstack.header.configuration.APIGenerateGroovyClassMsg',
'org.zstack.header.configuration.APIQueryInstanceOfferingMsg',
'org.zstack.header.configuration.APIUpdateInstanceOfferingMsg',
'org.zstack.header.configuration.APICreateInstanceOfferingMsg',
'org.zstack.header.configuration.APIGenerateApiJsonTemplateMsg',
'org.zstack.header.configuration.APICreateDiskOfferingMsg',
'org.zstack.header.configuration.APIDeleteInstanceOfferingMsg',
'org.zstack.header.configuration.APIGenerateSqlVOViewMsg',
'org.zstack.header.configuration.APIGenerateTestLinkDocumentMsg',
'org.zstack.header.configuration.APIGetGlobalPropertyMsg',
'org.zstack.header.configuration.APIChangeInstanceOfferingStateMsg',
'org.zstack.header.configuration.APIGenerateSqlIndexMsg',
'org.zstack.header.configuration.APIQueryDiskOfferingMsg',
'org.zstack.header.configuration.APIGenerateSqlForeignKeyMsg',
'org.zstack.header.configuration.APIUpdateDiskOfferingMsg',
'org.zstack.header.configuration.APIChangeDiskOfferingStateMsg',
'org.zstack.header.storage.primary.APIGetPrimaryStorageTypesMsg',
'org.zstack.header.storage.primary.APIAttachPrimaryStorageToClusterMsg',
'org.zstack.header.storage.primary.APIGetPrimaryStorageCapacityMsg',
'org.zstack.header.storage.primary.APIUpdatePrimaryStorageMsg',
'org.zstack.header.storage.primary.APIQueryPrimaryStorageMsg',
'org.zstack.header.storage.primary.APIChangePrimaryStorageStateMsg',
'org.zstack.header.storage.primary.APISyncPrimaryStorageCapacityMsg',
'org.zstack.header.storage.primary.APIDeletePrimaryStorageMsg',
'org.zstack.header.storage.primary.APIReconnectPrimaryStorageMsg',
'org.zstack.header.storage.primary.APIDetachPrimaryStorageFromClusterMsg',
'org.zstack.header.storage.primary.APIGetPrimaryStorageAllocatorStrategiesMsg',
'org.zstack.header.storage.snapshot.APIQueryVolumeSnapshotTreeMsg',
'org.zstack.header.storage.snapshot.APIDeleteVolumeSnapshotMsg',
'org.zstack.header.storage.snapshot.APIUpdateVolumeSnapshotMsg',
'org.zstack.header.storage.snapshot.APIDeleteVolumeSnapshotFromBackupStorageMsg',
'org.zstack.header.storage.snapshot.APIQueryVolumeSnapshotMsg',
'org.zstack.header.storage.sn | apshot.APIRevertVolumeFromSnapshotMsg',
'org.zstack.header.storage.snapshot.APIBackupVolumeSnapshotMsg',
'org.zstack.header.storage.snapshot.APIGetVolumeSnapshotTreeMsg',
'org.zstack.header.storage.backup.APIQueryBackupStorageMsg',
| 'org.zstack.header.storage.backup.APIAttachBackupStorageToZoneMsg',
'org.zstack.header.storage.backup.APIGetBackupStorageTypesMsg',
'org.zstack.header.storage.backup.APIChangeBackupStorageStateMsg',
'org.zstack.header.storage.backup.APIScanBackupStorageMsg',
'org.zstack.header.storage.backup.APIGetBackupStorageCapacityMsg',
'org.zstack.header.storage.backup.APIDetachBackupStorageFromZoneMsg',
'org.zstack.header.storage.backup.APIUpdateBackupStorageMsg',
'org.zstack.header.storage.backup.APIDeleteBackupStorageMsg',
'org.zstack.header.network.l3.APIAddDnsToL3NetworkMsg',
'org.zstack.header.network.l3.APICreateL3NetworkMsg',
'org.zstack.header.network.l3.APIGetFreeIpMsg',
'org.zstack.header.network.l3.APIUpdateL3NetworkMsg',
'org.zstack.header.network.l3.APIDeleteIpRangeMsg',
'org.zstack.header.network.l3.APIChangeL3NetworkStateMsg',
'org.zstack.header.network.l3.APIAddIpRangeMsg',
'org.zstack.header.network.l3.APIGetL3NetworkTypesMsg',
'org.zstack.header.network.l3.APIAddIpRangeByNetworkCidrMsg',
'org.zstack.header.network.l3.APIQueryIpRangeMsg',
'org.zstack.header.network.l3.APIRemoveDnsFromL3NetworkMsg',
'org.zstack.header.network.l3.APIGetIpAddressCapacityMsg',
'org.zstack.header.network.l3.APIDeleteL3NetworkMsg',
'org.zstack.header.network.l3.APIUpdateIpRangeMsg',
'org.zstack.header.network.l3.APIQueryL3NetworkMsg',
'org.zstack.header.network.service.APIAttachNetworkServiceToL3NetworkMsg',
'org.zstack.header.network.service.APIAddNetworkServiceProviderMsg',
'org.zstack.header.network.service.APIQueryNetworkServiceL3NetworkRefMsg',
'org.zstack.header.network.service.APIAttachNetworkServiceProviderToL2NetworkMsg',
'org.zstack.header.network.service.APIDetachNetworkServiceProviderFromL2NetworkMsg',
'org.zstack.header.network.service.APIQueryNetworkServiceProviderMsg',
'org.zstack.header.network.service.APIGetNetworkServiceTypesMsg',
'org.zstack.header.network.l2.APIAttachL2NetworkToClusterMsg',
'org.zstack.header.network.l2.APIQueryL2VlanNetworkMsg',
'org.zstack.header.network.l2.APICreateL2VlanNetworkMsg',
'org.zstack.header.network.l2.APIDetachL2NetworkFromClusterMsg',
'org.zstack.header.network.l2.APIDeleteL2NetworkMsg',
'org.zstack.header.network.l2.APICreateL2NoVlanNetworkMsg',
'org.zstack.header.network.l2.APIUpdateL2NetworkMsg',
'org.zstack.header.network.l2.APIGetL2NetworkTypesMsg',
'org.zstack.header.network.l2.APIQueryL2NetworkMsg',
'org.zstack.header.search.APIDeleteSearchIndexMsg',
'org.zstack.header.search.APISearchGenerateSqlTriggerMsg',
'org.zstack.header.search.APICreateSearchIndexMsg',
'org.zstack.header.tag.API |
phantomnat/my-cf-client-python | lib/cfclient/ui/tabs/AITab.py | Python | gpl-2.0 | 12,600 | 0.004286 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This tab plots different logging data defined by configurations that has been
pre-configured.
"""
__author__ = 'Bitcraze AB'
__all__ = ['AITab']
import glob
import json
import logging
import os
import sys
logger = logging.getLogger(__name__)
from PyQt4 import QtCore, QtGui, uic
from PyQt4.QtCore import pyqtSlot, pyqtSignal, QThread, Qt
from PyQt4.QtGui import QMessageBox
from PyQt4.QtGui import QApplication, QStyledItemDelegate, QAbstractItemView
from PyQt4.QtCore import QAbstractItemModel, QModelIndex, QString, QVariant
from pprint import pprint
import datetime
from cfclient.ui.widgets.plotwidget import PlotWidget
from cflib.crazyflie.log import Log
from cfclient.utils.controller import Controller
from cfclient.utils.planner import Planner
from cfclient.ui.tab import Tab
import threading
import socket
import SocketServer
import time
# import numpy
# import pylab
plot_tab_class = uic.loadUiType(sys.path[0] +
"/cfclient/ui/tabs/AITab.ui")[0]
class AITab(Tab, plot_tab_class):
"""Tab for plotting logging data"""
_log_data_signal = pyqtSignal(int, object)
_log_error_signal = pyqtSignal(object, str)
_disconnected_signal = pyqtSignal(str)
colors = ['g', 'b', 'm', 'r', 'y', 'c']
def __init__(self, tabWidget, helper, *args):
super(AITab, self).__init__(*args)
self.setupUi(self)
self.tabName = "AI"
self.menuName = "AI"
# self._plot = PlotWidget(fps=30)
# self._plot2 = PlotWidget(fps=30)
self._x_plot = PlotWidget(fps=30)
self._y_plot = PlotWidget(fps=30)
self._z_plot = PlotWidget(fps=30)
self._roll_plot = PlotWidget(fps=30)
self._pitch_plot = PlotWidget(fps=30)
# Check if we could find the PyQtImport. If not, then
# set this tab as disabled
self.enabled = self._x_plot.can_enable
# self.dataSelector.setModel(self._model)
# self._log_data_signal.connect(self._log_data_received)
self.tabWidget = tabWidget
self.helper = helper
# self.plotLayout.addWidget(self._plot)
self.plotX.addWidget(self._x_plot)
self.plotY.addWidget(self._y_plot)
self.plotZ.addWidget(self._z_plot)
#
# self.plotXOutput.addWidget(self._roll_plot)
# self.plotYOutput.addWidget(self._pitch_plot)
# self.plotLayout2.addWidget(self._plot2)
self._previous_config = None
self._started_previous = False
# initial TCP Server thread
# from cfclient.utils.tcpserver import TcpServerThread
self.controller = Controller(helper.cf)
# self.controller.start(QThread.HighestPriority)
self.controller.start(QThread.HighestPriority)
self.planner = Planner(self.controller)
self.planner.start(QThread.HighestPriority)
self.last_time = time.clock()
self._plot_time_count = 0
# self.TcpServer.ClientConnected.add_callback(self._client_connected)
# self.controller = Controller(self, helper.cf)
# helper.inputDeviceReader.input_updated.add_callback(self.controller.update_thrust)
helper.inputDeviceReader.autofly_updated.add_callback(self.controller.set_auto_fly)
# # self.controller.PlotUpdated.connect(self._data_received)
#
#
# self.tcp_server = TcpServerThread()
# self.tcp_server.data_received.add_callback(self.controller.update_input)
# self.tcp_server.data_received.add_callback(self._data_received)
#
# self.tcp_server.start()
# # self.controller.PIDDataUpdated.connect(self._pid_data_received)
#
# self.dsbRollKP.setValue(self.controller._x_kp)
# self.dsbRollKI.setValue(self.controller._x_ki)
# self.dsbRollKD.setValue(self.controller._x_kd)
# self.dsbRollKP.valueChanged.connect(self.controller._x_kp_changed)
# self.dsbRollKI.valueChanged.connect(self.controller._x_ki_changed)
# self.dsbRollKD.valueChanged.connect(self.controller._x_kd_changed)
#
# self.sbThrust.setValue(self.controller._thrust)
# self.sbThrust.valueChanged.connect(self.controller.set_thrust)
#
# self.btnStart.clicked.connect(self.controller.control_en_toggle)
#
# self.dsbPitchKP.setValue(self.controller._y_kp)
# self.dsbPitchKI.setValue(self.controller._y_ki)
# self.dsbPitchKD.setValue(self.controller._y_kd)
# self.dsbPitchKP.valueChanged.connect(self.controller._y_kp_changed)
# self.dsbPitchKI.valueChanged.connect(self.controller._y_ki_changed)
# self.hsYTargetPos.valueChanged.connect(self.controller.set_target_y)
self.controller.set_target_x(self.hsXTargetPos.value())
self.controller.set_target_y(self.hsYTargetPos.value())
self.controller.set_target_z(self.hsZTargetPos.value())
# self.controller.DepthUpdated.connect(self.lblDepth.setText)
self.hsXTargetPos.valueChanged.connect(self.controller.set_target_x)
self.hsYTargetPos.valueChanged.connect(self.controller.set_target_y)
self.hsZTargetPos.valueChanged.connect(self.controller.set_target_z)
self.btnBgSub.released.connect(self.controller.background_subtraction)
self.btnObsFind.released.connect(self.controller.find_obstacle)
self.btnShowBg.released.connect(self.controller.show_bg)
self.btnTakeOff.released.connect(self.planner.take_off)
self.btnLanding.released.connect(self.planner.landing)
self.btnA2B.released.connect(self.planner.task_a2b)
self.btnTask3.released.connect(self.planner.task3)
self.btnPathPlan.released.connect(self.planner.path_planning)
self.btnFollowPath.released.connect(self.planner.task4)
# self.sliderThImage.valueChanged.connect(self.controller.change_th)
self.controller.ImageUpdated.connect(self._slot_image_updated)
self.c | ontroller.PositionUpdated.connect(self._data_received)
self.controller.OutputUpdated.connect(self._output_controller_updated)
self.controller.ErrorUpdated.connect(self._slot_error_updated)
self.btnErrorRe | set.released.connect(self.controller.slot_reset_error)
#
#
# self._plot.set_title('Position')
# color_selector = 0
# self._plot.add_curve('actual.x', self.colors[color_selector % len(self.colors)])
# color_selector += 1
# self._plot.add_curve('actual.y', self.colors[color_selector % len(self.colors)])
# color_selector += 1
# self._plot.add_curve('actual.z', self.colors[color_selector % len(self.colors)])
#
# #
# color_selector += 1
# self._plot.add_curve('target.x', self.colors[color_selector % len(self.colors)])
# color_selector += 1
# self._plot.add_curve('target.y', self.colors[color_selector % len(self.colors)])
# color_selector += 1
# self._plot.add_curve('target.z', self.colors[color_selector % len(self.colors)])
|
tuukkao/sananmuunnos | sananmuunnos.py | Python | mit | 4,465 | 0.007427 | #!/usr/bin/envpython
#-*- coding: utf-8 -*-
"""
Sananmuunnos: Transforming Finnish spoonerisms made easy (and systematic).
"""
__author__ = "Tuukka Ojala"
__email__ = "tuukka.ojala@gmail.com"
__version__ = "2015.0918"
__license__ = "MIT"
import re
#Regular expressions for detecting different types of sananmuunnoses
#Double vowel: the word begins with a consonant and continues with
#two identical vowels.
_double_vowel = re.compile(r"^[^aeiouyäö]?([aeiouyäö])\1")
#initial vowel: the word begins with a vowel and continues with a letter which
#is not the same as the previous one.
_initial_vowel = re.compile(r"^[aeiouyäö]")
#Initial consonant: The word begins with a consonant and continues with
#two non-identical vowels.
_initial_consonant = re.compile(r"^[^aeiouyäö]([aeiouyäö])[^\1]")
#Matches any vowel.
_vowel = re.compile(r"[aeiouyäö]")
"""The following 3 functions test a pair of words against the regular expressions above. If they match, the words are transformed accordingly. Otherwise the function returns false."""
def _is_double_vowel(word1, word2):
"""Test word1 and word2 against the "double vowel" rule."""
match = _double_vowel.search(word2)
if match:
vowel1 = _vowel.search(word1)
vowel2 = _vowel.search(word2)
initial1 = word1[:vowel1.start() +1] + word1[vowel1.start()]
initial2 = word2[:vowel2.start() +1]
transformed1 = initial2 +word1[vowel1.end():]
transformed2 = initial1 + word2[vowel2.end() +1:]
return (transformed1, transformed2)
else:
return False
def _is_initial_vowel(word1, word2):
"""Test word1 and word2 against the "initial vowel" rule."""
if _initial_vowel.search(word1):
transformed1 = word2[:2] +word1[1:]
transformed2 = word1[0] +word2[2:]
return (transformed1, transformed2)
else: |
return False
def _is_initial_consonant(word1, word2):
"""Test word1 and word2 against the "initial consonant" rule. | """
if _initial_consonant.search(word1):
transformed1 = word2[:2] +word1[2:]
transformed2 = word1[:2] +word2[2:]
return (transformed1, transformed2)
else:
return False
def _vowel_harmony(word):
"""Attempts to make the given word comply with Finnish vowel harmony.
If the first vowel of the word is a front vowel (a, o or u) all the vowels
get transformed to their equivalent back vowels (ä, ö, y) and vice versa."""
vowel = _vowel.search(word)
if vowel and word[vowel.start()] in ["a","o","u"]:
word = word.replace("ä", "a")
word = word.replace("ö", "o")
word = word.replace("y", "u")
elif vowel and word[vowel.start()] in ["y", "ä", "ö"]:
word = word.replace("u", "y")
word = word.replace("a", "ä")
word = word.replace("o", "ö")
return word
def _test(transformation, word1, word2):
"""Tries transforming word1 and word2 with the given transform function.
It tries swapping the words if the transformation fails.
This function returnsthe transformed words or false if
the transformation failed both ways."""
result = transformation(word1, word2)
if not result:
result = transformation(word2, word1)
if result:
return (result[1], result[0])
return result
def transform(words):
"""Make a sananmuunnos ("word transformation") out of the given words.
This function returns either the created sananmuunnos or None
if the transformation failed."""
transformed = None
words = words.lower()
words_list = []
try:
words_list = words.split(" ")
if len(words_list) < 2:
return None
word1 = words_list[0]
word2 = words_list[-1]
except ValueError:
return None
for transformation in _transformations:
transformed = _test(transformation, word1, word2)
if transformed:
break
word1, word2 = transformed
word1 = _vowel_harmony(word1)
word2 = _vowel_harmony(word2)
return " ".join((word1, " ".join(words_list[1:-1]), word2))
#List of transformations used by the "transform" function.
_transformations = [_is_double_vowel, _is_initial_vowel, _is_initial_consonant]
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print("Usage: {} word1 word2 [...]".format(sys.argv[0]))
else:
print(transform(" ".join(sys.argv[1:])))
|
Jet-Streaming/gyp | test/actions/src/subdir1/counter.py | Python | bsd-3-clause | 1,151 | 0.010426 | #!/usr/bin/env python
# Copyright (c) 2010 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import time
output = sys.argv[1]
persistoutput = "%s.persist" % sys.argv[1]
count = 0
try:
count = open(persistoutput, 'r').read()
except:
pass
count = int(count) + 1
if len(sys.argv) > 2:
max_count = int(sys.argv[2])
if count > max_count:
count = max_count
oldcount = 0
try:
oldcount = open(output, 'r').read()
except:
pass
# Save | the count in a file that is undeclared, and thus hidden, to gyp. We need
# to do this because, prior to running commands, some build systems deletes
# any declared outputs, so we would lose our count i | f we just wrote to the
# given output file.
open(persistoutput, 'w').write('%d' % (count))
# Only write the given output file if the count has changed.
if int(oldcount) != count:
open(output, 'w').write('%d' % (count))
# Sleep so the next run changes the file time sufficiently to make the build
# detect the file as changed.
time.sleep(1)
sys.exit(0)
|
MicroTrustRepos/microkernel | src/l4/pkg/python/contrib/Doc/includes/sqlite3/complete_statement.py | Python | gpl-2.0 | 670 | 0 | # A minimal SQLite shell for experiments
import sqlite3
con = sqlite3.connect(":memory:")
con.isolation_level = None
cur = con.cursor()
buffer = ""
print "Enter your SQL commands to execute in sqlite3."
print "Enter a blank line to exit."
while True:
line = raw_input()
if line == "":
break
buffer += line
if sqlite3.complete_statement(buffer):
try:
buffer | = buffer.strip()
cur.execute(buffer)
if buffer.lstrip().upper().startswith("SELECT"):
print cur.fetchall()
except sqlite3.Error, e:
print "An error occurred:", e.ar | gs[0]
buffer = ""
con.close()
|
ShovanSarker/sense_v4_withLocal | template_manager/views.py | Python | gpl-2.0 | 113,348 | 0.00532 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.shortcuts import render, redirect, HttpResponse
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from subscriber.models import Consumer, ConsumerType, Recharge, TotalRecharge, ACL
from product.models import Product
from voice_records.models import VoiceRecord, VoiceReg
from sms.models import SMSPayment
# from local_lib.v3 import is_number, is_float
from local_lib.v3 import is_number, is_float, is_bangladeshi_number, is_japanese_number, send_sms
from transaction.models import Transaction, ProductsInTransaction, BuyerSellerAccount, dueTransaction
from shop_inventory.models import Inventory, BuySellProfitInventoryIndividual, BuySellProfitInventory
from transcriber_management.models import Transcriber, TranscriberInTranscription, FailedTranscription
import datetime
from django.db.models import Q
from django.contrib.auth.models import User
from django.contrib.sessions.backends.db import SessionStore
from django.db.models import Count
@csrf_exempt
def login_page(request):
return render(request, 'pages/login.html')
@csrf_exempt
def login_auth(request):
postdata = request.POST
print(postdata)
if 'username' and 'password' in postdata:
print(postdata['username'])
login_username = postdata['username']
print(postdata['password'])
if ACL.objects.filter(loginID=postdata['username'][-9:]).exists():
login_username = login_username[-9:]
else:
login_username = login_username
user = authenticate(username=login_username, password=postdata['password'])
if user is not None:
if user.is_active:
login(request, user)
request.session['user'] = login_username
if user.is_superuser:
res = redirect('/admin')
else:
res = redirect('/')
else:
res = render(request, 'pages/login.html',
{'wrong': True,
'text': 'The password is valid, but the account has been disabled!'})
else:
res = render(request, 'pages/login.html',
{'wrong': True,
'text': ' | The username and password you have entered is not correct. Please retry'})
else:
res = render(request, 'pages/login.html', {'wrong': False})
res['Access-Control-Allow-Origin'] = "*"
res['Access-Control-Allow-Headers'] = "Origin, X-Requested-With, Content-Type, Accept"
res['Access-Control-Allow-Methods'] = "PUT, GET, POST, DELETE, OPTIONS"
return res
def logout_now(request):
logout(request)
return render(request, 'pages/login.html')
@login_required(login_url= | '/login/')
def home(request):
transcriber_name = request.session['user']
print request.session['user']
if ACL.objects.filter(loginID=transcriber_name).exists():
login_user = ACL.objects.get(loginID=transcriber_name)
print(login_user.loginUser.name)
transcriber_name = login_user.loginUser.name
if login_user.loginUser.type.type_name == 'Distributor':
if login_user.loginUser.number_of_child == 'CHANGED !!!':
return render(request, 'pages/Distributor/index.html', {'transcriber_name': transcriber_name})
else:
return redirect('/change_password/')
elif login_user.loginUser.type.type_name == 'SR':
if login_user.loginUser.number_of_child == 'CHANGED !!!':
return render(request, 'pages/SR/index.html', {'transcriber_name': transcriber_name})
else:
return redirect('/change_password/')
elif login_user.loginUser.type.type_name == 'Seller':
if login_user.loginUser.number_of_child == 'CHANGED !!!':
return render(request, 'pages/Shop/index.html', {'transcriber_name': transcriber_name})
else:
return redirect('/change_password/')
elif login_user.loginUser.type.type_name == 'Buyer':
if login_user.loginUser.number_of_child == 'CHANGED !!!':
return render(request, 'pages/Consumer/index.html', {'transcriber_name': transcriber_name})
else:
return redirect('/change_password/')
else:
number_of_reg_calls = VoiceReg.objects.filter().count()
number_of_transaction_calls = VoiceRecord.objects.filter().count()
total = number_of_reg_calls + number_of_transaction_calls
if total > 0:
reg_call_percentage = (number_of_reg_calls / float(total)) * 100
transaction_call_percentage = (number_of_transaction_calls / float(total)) * 100
else:
transaction_call_percentage = 0
reg_call_percentage = 0
today_month = datetime.date.today().month
today_year = datetime.date.today().year
count = 1
data_2 = ''
data_3 = ''
data_4 = ''
data_5 = ''
data_6 = ''
max = 0
max_table_2 = 0
total_sell = VoiceRecord.objects.filter(purpose='sell').count()
total_buy = VoiceRecord.objects.filter(purpose='buy').count()
total_money_transaction = SMSPayment.objects.filter().count()
total_for_chart2 = number_of_reg_calls + number_of_transaction_calls
if total_for_chart2 > 0:
sell_percentage = (total_sell / float(total_for_chart2)) * 100
buy_percentage = (total_buy / float(total_for_chart2)) * 100
money_transaction_percentage = (total_money_transaction / float(total_for_chart2)) * 100
else:
sell_percentage = 0
buy_percentage = 0
money_transaction_percentage = 0
while count < 32:
total_call_that_day = VoiceRecord.objects.filter(DateAdded__month=today_month,
DateAdded__year=today_year, DateAdded__day=count).count()
total_reg_that_day = VoiceReg.objects.filter(DateAdded__month=today_month,
DateAdded__year=today_year, DateAdded__day=count).count()
if max < total_call_that_day:
max = total_call_that_day + 2
if max < total_reg_that_day:
max = total_reg_that_day + 2
data_2 += '[gd(%s, %s, %s), %s],' % (today_year, today_month, count, total_call_that_day)
data_3 += '[gd(%s, %s, %s), %s],' % (today_year, today_month, count, total_reg_that_day)
total_buy_that_day = VoiceRecord.objects.filter(DateAdded__month=today_month,
DateAdded__year=today_year,
DateAdded__day=count,
purpose='buy').count()
total_sell_that_day = VoiceRecord.objects.filter(DateAdded__month=today_month,
DateAdded__year=today_year,
DateAdded__day=count,
purpose='sell').count()
total_payment_that_day = SMSPayment.objects.filter(DateAdded__month=today_month,
DateAdded__year=today_year,
DateAdded__day=count).count()
if max_table_2 < total_buy_that_day:
max_table_2 = total_buy_that_day + 2
if max_table_2 < total_sell_that_day:
max_table_2 = total_sell_that_day + 2
if max_table_2 < total_payment_that_day:
max_table_2 = total_payment_that_day + 2
data_4 += '[gd(%s, %s, %s), %s],' % (today_year, today_month, count, total_buy_that_day)
data_5 += '[gd(%s, %s, %s), %s],' % (today_year, today_ |
miqui/python-hpOneView | examples/scripts/get-profiles.py | Python | mit | 4,576 | 0.001311 | #!/usr/bin/env python
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
| from __f | uture__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if PY2:
if PYTHON_VERSION < (2, 7, 9):
raise Exception('Must use Python 2.7.9 or later')
elif PYTHON_VERSION < (3, 4):
raise Exception('Must use Python 3.4 or later')
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def get_all_profiles(srv):
profiles = srv.get_server_profiles()
for profile in profiles:
pprint(profile)
def get_profile_by_name(srv, name):
profiles = srv.get_server_profiles()
for profile in profiles:
if profile['name'] == name:
print(('Getting Profile %s' % profile['name']))
pprint(profile)
return
print('Profile: ', name, ' not found')
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Display Server Profiles
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-n', dest='name',
help='''
Name of the server profile to get''')
group.add_argument('-g', dest='get_all',
action='store_true',
help='''
Get ALL server profiles and exit''')
args = parser.parse_args()
credential = {'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
srv = hpov.servers(con)
net = hpov.networking(con)
sts = hpov.settings(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
if args.get_all:
get_all_profiles(srv)
sys.exit()
get_profile_by_name(srv, args.name)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
kernc/pelican | pelican/tests/default_conf.py | Python | agpl-3.0 | 1,314 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
AUTHOR = 'Alexis Métaireau'
SITENAME = "Alexis' log"
SITEURL = 'http://blog.notmyidea.org'
TIMEZONE = 'UTC'
GITHUB_URL = 'http://github.com/ametaireau/'
DISQUS_SITENAME = "blog-notmyidea"
PDF_GENERATOR = False
REVERSE_CATEGORY_ORDER = True
DEFAULT_PAGINATION = 2
FEED_RSS = 'feeds/all.rss.xml'
CATEGORY_FEED_RSS = 'feeds/%s.rss.xml'
LINKS = (('Biologeek', 'http://biologeek.org'),
('Filyb', "http://filyb.info/"),
('Libert-fr', "http://www.libert-fr.com"),
('N1k0', "http://prendreuncafe.com/blog/"),
('Tarek Ziadé', "http://ziade.org/blog"),
('Zubin Mithra', "http://zubin71.wordpress.com/"),)
SOCIAL = (('twitter', 'http://twitter.com/ametaireau'),
('lastfm', 'http://lastfm.com/user/akounet'),
('github', 'http://github.com/ametaireau'),)
# global metadata to all the contents
DEFAULT_METADATA = {'yeah': 'it is'}
# path-specific metadata
EXTRA_PATH_METADATA = {
'extra/robots.txt' | : {'path': 'robots.txt'},
}
# static paths will be copied without parsing their contents
STATIC_PATHS = [
| 'pictures',
'extra/robots.txt',
]
# foobar will not be used, because it's not in caps. All configuration keys
# have to be in caps
foobar = "barbaz"
|
z-uo/pixeditor | dock_tools.py | Python | gpl-3.0 | 2,923 | 0.004105 | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
from PyQt4 import QtCore
from PyQt4 import Q | tGui
from widget import Button, Label
class ToolsWidget(QtGui.QWidget):
""" widget cantaining tools buttons """
def __init__(self, project):
QtGui.QWidget.__init__(self)
self.project = project
### coordinates ###
self.coords = Label("Cursor coordinates")
self.coords.setText("x\ny");
### tools buttons ###
self.penB = Button("pen (1)", "icons/tool_pen.png", self.penClicked, True)
self.penB | .setChecked(True)
self.project.toolSetPenSign.connect(self.penClicked)
self.pipetteB = Button("pipette (2)", "icons/tool_pipette.png", self.pipetteClicked, True)
self.fillB = Button("fill (3)", "icons/tool_fill.png", self.fillClicked, True)
self.moveB = Button("move (4)", "icons/tool_move.png", self.moveClicked, True)
self.selectB = Button("select (5)", "icons/tool_select.png", self.selectClicked, True)
### Layout ###
layout = QtGui.QVBoxLayout()
layout.setSpacing(0)
layout.addWidget(self.coords)
layout.addWidget(self.penB)
layout.addWidget(self.pipetteB)
layout.addWidget(self.fillB)
layout.addWidget(self.moveB)
layout.addWidget(self.selectB)
layout.addStretch()
layout.setContentsMargins(6, 0, 6, 0)
self.setLayout(layout)
def penClicked(self):
self.project.tool = "pen"
self.penB.setChecked(True)
self.pipetteB.setChecked(False)
self.fillB.setChecked(False)
self.moveB.setChecked(False)
self.selectB.setChecked(False)
self.project.toolChangedSign.emit()
def pipetteClicked(self):
self.project.tool = "pipette"
self.penB.setChecked(False)
self.fillB.setChecked(False)
self.pipetteB.setChecked(True)
self.moveB.setChecked(False)
self.selectB.setChecked(False)
self.project.toolChangedSign.emit()
def fillClicked(self):
self.project.tool = "fill"
self.fillB.setChecked(True)
self.pipetteB.setChecked(False)
self.penB.setChecked(False)
self.moveB.setChecked(False)
self.selectB.setChecked(False)
self.project.toolChangedSign.emit()
def moveClicked(self):
self.project.tool = "move"
self.fillB.setChecked(False)
self.pipetteB.setChecked(False)
self.penB.setChecked(False)
self.moveB.setChecked(True)
self.selectB.setChecked(False)
self.project.toolChangedSign.emit()
def selectClicked(self):
self.project.tool = "select"
self.fillB.setChecked(False)
self.pipetteB.setChecked(False)
self.penB.setChecked(False)
self.moveB.setChecked(False)
self.selectB.setChecked(True)
self.project.toolChangedSign.emit()
|
kaushik94/sympy | sympy/physics/mechanics/models.py | Python | bsd-3-clause | 6,458 | 0.000465 | #!/usr/bin/env python
"""This module contains some sample symbolic models used for testing and
examples."""
# Internal imports
from sympy.core import backend as sm
import sympy.physics.mechanics as me
|
def multi_mass_spring_damper(n=1, apply_gravity=False,
apply_external_forces=False):
r"""Returns a system containing the symbolic equations of motion and
associated variables for a simple multi-degree of freedom point mass,
spring, damper system with optional gravitational and external
specified forces. For example, a two mass syst | em under the influence of
gravity and external forces looks like:
::
----------------
| | | | g
\ | | | V
k0 / --- c0 |
| | | x0, v0
--------- V
| m0 | -----
--------- |
| | | |
\ v | | |
k1 / f0 --- c1 |
| | | x1, v1
--------- V
| m1 | -----
---------
| f1
V
Parameters
----------
n : integer
The number of masses in the serial chain.
apply_gravity : boolean
If true, gravity will be applied to each mass.
apply_external_forces : boolean
If true, a time varying external force will be applied to each mass.
Returns
-------
kane : sympy.physics.mechanics.kane.KanesMethod
A KanesMethod object.
"""
mass = sm.symbols('m:{}'.format(n))
stiffness = sm.symbols('k:{}'.format(n))
damping = sm.symbols('c:{}'.format(n))
acceleration_due_to_gravity = sm.symbols('g')
coordinates = me.dynamicsymbols('x:{}'.format(n))
speeds = me.dynamicsymbols('v:{}'.format(n))
specifieds = me.dynamicsymbols('f:{}'.format(n))
ceiling = me.ReferenceFrame('N')
origin = me.Point('origin')
origin.set_vel(ceiling, 0)
points = [origin]
kinematic_equations = []
particles = []
forces = []
for i in range(n):
center = points[-1].locatenew('center{}'.format(i),
coordinates[i] * ceiling.x)
center.set_vel(ceiling, points[-1].vel(ceiling) +
speeds[i] * ceiling.x)
points.append(center)
block = me.Particle('block{}'.format(i), center, mass[i])
kinematic_equations.append(speeds[i] - coordinates[i].diff())
total_force = (-stiffness[i] * coordinates[i] -
damping[i] * speeds[i])
try:
total_force += (stiffness[i + 1] * coordinates[i + 1] +
damping[i + 1] * speeds[i + 1])
except IndexError: # no force from below on last mass
pass
if apply_gravity:
total_force += mass[i] * acceleration_due_to_gravity
if apply_external_forces:
total_force += specifieds[i]
forces.append((center, total_force * ceiling.x))
particles.append(block)
kane = me.KanesMethod(ceiling, q_ind=coordinates, u_ind=speeds,
kd_eqs=kinematic_equations)
kane.kanes_equations(particles, forces)
return kane
def n_link_pendulum_on_cart(n=1, cart_force=True, joint_torques=False):
r"""Returns the system containing the symbolic first order equations of
motion for a 2D n-link pendulum on a sliding cart under the influence of
gravity.
::
|
o y v
\ 0 ^ g
\ |
--\-|----
| \| |
F-> | o --|---> x
| |
---------
o o
Parameters
----------
n : integer
The number of links in the pendulum.
cart_force : boolean, default=True
If true an external specified lateral force is applied to the cart.
joint_torques : boolean, default=False
If true joint torques will be added as specified inputs at each
joint.
Returns
-------
kane : sympy.physics.mechanics.kane.KanesMethod
A KanesMethod object.
Notes
-----
The degrees of freedom of the system are n + 1, i.e. one for each
pendulum link and one for the lateral motion of the cart.
M x' = F, where x = [u0, ..., un+1, q0, ..., qn+1]
The joint angles are all defined relative to the ground where the x axis
defines the ground line and the y axis points up. The joint torques are
applied between each adjacent link and the between the cart and the
lower link where a positive torque corresponds to positive angle.
"""
if n <= 0:
raise ValueError('The number of links must be a positive integer.')
q = me.dynamicsymbols('q:{}'.format(n + 1))
u = me.dynamicsymbols('u:{}'.format(n + 1))
if joint_torques is True:
T = me.dynamicsymbols('T1:{}'.format(n + 1))
m = sm.symbols('m:{}'.format(n + 1))
l = sm.symbols('l:{}'.format(n))
g, t = sm.symbols('g t')
I = me.ReferenceFrame('I')
O = me.Point('O')
O.set_vel(I, 0)
P0 = me.Point('P0')
P0.set_pos(O, q[0] * I.x)
P0.set_vel(I, u[0] * I.x)
Pa0 = me.Particle('Pa0', P0, m[0])
frames = [I]
points = [P0]
particles = [Pa0]
forces = [(P0, -m[0] * g * I.y)]
kindiffs = [q[0].diff(t) - u[0]]
if cart_force is True or joint_torques is True:
specified = []
else:
specified = None
for i in range(n):
Bi = I.orientnew('B{}'.format(i), 'Axis', [q[i + 1], I.z])
Bi.set_ang_vel(I, u[i + 1] * I.z)
frames.append(Bi)
Pi = points[-1].locatenew('P{}'.format(i + 1), l[i] * Bi.y)
Pi.v2pt_theory(points[-1], I, Bi)
points.append(Pi)
Pai = me.Particle('Pa' + str(i + 1), Pi, m[i + 1])
particles.append(Pai)
forces.append((Pi, -m[i + 1] * g * I.y))
if joint_torques is True:
specified.append(T[i])
if i == 0:
forces.append((I, -T[i] * I.z))
if i == n - 1:
forces.append((Bi, T[i] * I.z))
else:
forces.append((Bi, T[i] * I.z - T[i + 1] * I.z))
kindiffs.append(q[i + 1].diff(t) - u[i + 1])
if cart_force is True:
F = me.dynamicsymbols('F')
forces.append((P0, F * I.x))
specified.append(F)
kane = me.KanesMethod(I, q_ind=q, u_ind=u, kd_eqs=kindiffs)
kane.kanes_equations(particles, forces)
return kane
|
Samsung/ADBI | idk/cachebuilder/dwarftools.py | Python | apache-2.0 | 6,162 | 0.003895 | from elftools.dwarf.ranges import RangeEntry, BaseAddressEntry
from elftools.dwarf import constants
def get_die_offset_by_reference(referer_die, attrname, use_abstract_origin=True):
'''Return the offset of the DIE referred by the given attribute in the referrer DIE.'''
ref = referer_die.attributes.get(attrname, None)
if attrname != 'DW_AT_abstract_origin' and ref is None and use_abstract_origin:
origin_die = get_origin_die(referer_die)
if origin_die:
return get_die_offset_by_reference(origin_die, attrname, use_abstract_origin)
if ref is None:
return None
elif ref.form.startswith('DW_FORM_ref'):
# Reference to a DIE in the current CU
return referer_die.cu.cu_offset + ref.value
elif ref.form in ('DW_FORM_ref_sig8', 'DW_FORM_ref_addr'):
raise NotImplementedError('Type references encoded as %s are not implemented.' % ref.form)
else:
raise ValueError
def get_die_by_reference(referer_die, attrname, use_abstract_origin=True):
'''Return the DIE referred by the given attribute in the referrer DIE.'''
offset = get_die_offset_by_reference(referer_die, attrname, use_abstract_origin)
if offset is None:
return None
# Iterate through the DIEs searching for the right one
for target_die in referer_die.cu.iter_DIEs():
if target_die.offset == offset:
return target_die
# It's not in the current DIE, iterate through all DIEs
for compilation_unit in referer_die.dwarfinfo.iter_CUs():
if compilation_unit.cu_offset == referer_die.cu_offset:
# We've already searched this CU
continue
for target_die in compilation_unit.iter_DIEs():
if target_die.offset == offset:
return target_die
raise ValueError
def get_origin_die(die):
return get_die_by_reference(die, 'DW_AT_abstract_origin')
def get_attr_form_val(die, what, use_abstract_origin=True):
'''Return the form and value of the given attribute of the given DIE.'''
try:
return die.attributes[what].form, die.attributes[what].value
except KeyError:
if use_abstract_origin:
origin_die = get_origin_die(die)
if origin_die:
return get_attr_form_val(origin_die, what, use_abstract_origin)
# Everything failed, no value found
return None, None
def get_attr_val(die, what, use_abstract_origin=True):
'''Return the value of the given attribute of the given DIE.'''
form, val = get_attr_form_val(die, what, use_abstract_origin)
return val
def iter_ranges(die):
def iter_range_list(ranges):
def iter_pairs():
# by default addresses are relative to the CU base address
base = die.cu.get_top_DIE().attributes['DW_AT_low_pc'].value
| f | or entry in ranges:
if isinstance(entry, BaseAddressEntry):
base = entry.base_adress
elif isinstance(entry, RangeEntry):
yield base + entry.begin_offset, base + entry.end_offset
else:
raise ValueError('Invalid element in range list.')
def merge_ranges(ranges):
'''Yield ranges equivalent to the given ones, but simplified if possible.'''
next_range = (None, None)
for low, high in sorted(ranges):
if next_range[1] == low:
next_range = (next_range[0], high)
else:
if next_range[0] is not None:
yield next_range
next_range = (low, high)
if next_range[0] is not None:
yield next_range
return merge_ranges(iter_pairs())
if die.tag == 'DW_TAG_subprogram' and 'DW_AT_inline' in die.attributes:
if die.attributes['DW_AT_inline'].value in (constants.DW_INL_inlined, constants.DW_INL_declared_inlined):
# inlined function abstract entry
return
# inlined function instance
if die.tag == 'DW_TAG_inlined_subroutine':
return
if 'DW_AT_ranges' in die.attributes:
rangelist_offset = die.attributes['DW_AT_ranges'].value
rl = die.dwarfinfo.range_lists().get_range_list_at_offset(rangelist_offset)
for low, high in iter_range_list(rl):
yield low, high
elif 'DW_AT_low_pc' in die.attributes:
low = get_attr_val(die, 'DW_AT_low_pc', False)
high_form, high = get_attr_form_val(die, 'DW_AT_high_pc', False) or low + 1
if high_form.startswith('DW_FORM_data'):
high += low
yield low, high
elif die.get_parent():
for x in iter_ranges(die.get_parent()):
yield x
def iter_loclist(loclistptr):
raise NotImplementedError
def iter_expressions(die, attr_name='DW_AT_location'):
def get_loclist(ptr):
return die.dwarfinfo.location_lists().get_location_list_at_offset(ptr)
try:
location_attr = die.attributes[attr_name]
if location_attr.form == 'DW_FORM_exprloc':
# Single location expression
yield None, None, location_attr.value
elif location_attr.form.startswith('DW_FORM_block'):
# Single location expression. This form is not legal for location expressions,
# but GCC uses it anyway...
yield None, None, location_attr.value
elif location_attr.form == 'DW_FORM_sec_offset':
for low, high, expr in get_loclist(location_attr.value):
yield low, high, expr
elif location_attr.form.startswith('DW_FORM_data'):
# Another illegal form for location expressions used by GCC.
# addresses are relative to cu base address
cuaddr = die.cu.get_top_DIE().attributes['DW_AT_low_pc'].value
for low, high, expr in get_loclist(location_attr.value):
yield cuaddr + low, cuaddr + high, expr
else:
raise ValueError('%s form of DW_AT_location is not supported.' % location_attr.form)
except KeyError:
pass
|
markcwill/obspy_ext | antelope/dbobjects.py | Python | gpl-3.0 | 5,306 | 0.007727 | #! /usr/bin/env python
#
# dbobjs.py
#
# obspy antelope dbatabase objects module
# by Mark Williams 2012.013
# Oregon State University
#
# Contains basic classes to interect with (read) data from Antelope
# Datascope database tables into ObsPy using the Antelope Python interface.
#
# These classes load data from the database into python on creation. Once
# created, a db can be closed or destroyed and the data is in python memory.
from obspy_ext.antelope.utils import add_antelope_path
add_antelope_path()
from antelope.datascope import * # all is necessary for db query variables
from obspy.core.util import AttribDict
from numpy import array
class Dbrecord(AttribDict):
"""
Holds one record line from an Antelope Datascope database
Fields can be accessed as attributes, e.g. dbr.sta or keys, dbr['sta']
"""
# These are according to Antelope and the schema of your db
Ptr = Dbptr()
Table = None # string of what table record came from
PrimaryKey = () # tuple of strings of fields in primary key
_fields_unsorted = () # tuple of fields from database record
# IN FIELD NUMBERD ORDER
@property
def Fields(self):
flist = list(self._fields_unsorted)
flist.sort()
return flist
def __init__(self, db=None):
"""
Create a Dbrecord
Pass an open db pointer, or make an empty one to populate.
set every field to its value according to the db, even NULLS.
If there's a problem, you will get None for the value, which won't
be a NULL value but it's the next best thing.
.. rubric:: Example
| >>> dbopen('demo','r')
>>> db.lookup(table='arrival')
>>> db.record = 0
>>> pick = Dbrecord(db)
"""
if db:
if db.record == dbALL:
raise ValueError("Rec # is 'dbALL', for multiple records, use Dbview().")
self.Ptr = Dbptr(db)
sel | f.Table = db.query(dbTABLE_NAME)
self.PrimaryKey = db.query(dbPRIMARY_KEY)
self._fields_unsorted = db.query(dbTABLE_FIELDS)
self._tables = db.query(dbVIEW_TABLES)
# NOTE: in some cases, the query will return a valid field name,
# but dbgetv can't extract a value. The try catches this error.
for field_name in self._fields_unsorted:
if db.query(dbVIEW_TABLE_COUNT) > 1:
if field_name in self.__dict__:
field_name = '.'.join(db.query(dbFIELD_BASE_TABLE),field_name)
try:
field_value = db.getv(field_name)[0]
except:
field_value = None
super(Dbrecord,self).__setitem__(field_name, field_value)
else:
self.Table = 'Empty'
self.PrimaryKey = ('Table',)
self._fields_unsorted = ()
def __repr__(self):
"""
Useful representation - shows the table and primary key of the record.
"""
start = "{0}('{1}' -> ".format(self.__class__.__name__, self.Table)
# Build up a list containing the fields of the primary key
# Annoyingly, times have a '::' between them, so deal with that...
mids = []
for k in self.PrimaryKey:
if '::' in k:
keyf = '::'.join([str(self.__dict__[_k]) for _k in k.split('::')])
else:
keyf = str(self.__dict__[k])
mids.append(keyf)
middle = ' '.join(mids)
end = ")"
return start+middle+end
def __str__(self):
"""
Prints out record content as a string.
SHOULD be the same as if you cat'ted a line from the table file
(w/o the extra whitespace)
"""
fields = [str(self.__dict__[f]) for f in self._fields_unsorted]
return ' '.join(fields)
class DbrecordList(list):
"""
A list-like container of Dbrecord objects.
A list that accepts a Dbptr as a constructor argument, calls Dbrecord for
every record the pointer references, and adds it to the list. Index number
corresponds to record number for that view.
.. rubric:: Example
>>> db = dbopen('demo','r')
>>> db.lookup(table='site')
>>> dblist = DbrecordList(db)
>>> db.nrecs() == len(dblist)
True
"""
def __init__(self, dbv=None):
"""
Creates a list of Dbrecords from a pointer
:type dbv: antelope.datascope.Dbptr
:param dbv: Open pointer to an Antelope database view or table
"""
super(DbrecordList,self).__init__()
if isinstance(dbv, Dbptr):
db = Dbptr(dbv)
self.extend([Dbrecord(db) for db.record in range(db.nrecs())])
# otherwise returns empty list
# Convenience functions
def col(self, field):
"""A column of the same field from each Dbrecord"""
return [dbr[field] for dbr in self if field in dbr.Fields ]
def acol(self, field):
"""A numpy array of the same field from each Dbrecord"""
return array(self.col(field))
|
interhui/py-sys | py_sys/execute.py | Python | apache-2.0 | 477 | 0.012579 | # | coding=utf-8
import os
def run(cmd, decoding = None, clean = False):
if cmd:
result = []
output = os.popen(cmd).readlines()
for line in output:
if decoding:
line = line.decode(decoding)
if clean:
line = line.strip()
if line and line != '':
result.append(line)
return result
else:
raise ValueError('Command is Empty')
| |
mikehulluk/morphforge | src/morphforgecontrib/fake_namespaces/postsynaptictypes.py | Python | bsd-2-clause | 1,976 | 0.002024 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYR | IGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING | , BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforgecontrib.simulation.synapse_templates.exponential_form.expsyn.core import PostSynapticMech_ExpSyn_Base as ExpSynTemplateType
from morphforgecontrib.simulation.synapse_templates.exponential_form.exp2syn.core import PostSynapticMech_Exp2Syn_Base as Exp2SynTemplateType
from morphforgecontrib.simulation.synapse_templates.exponential_form.exp2synnmda.core import PostSynapticMech_Exp2SynNMDA_Base as Exp2NMDASynTemplateType
|
qpxu007/Flask-AppBuilder | flask_appbuilder/security/forms.py | Python | bsd-3-clause | 3,248 | 0.004926 | from wtforms import StringField, BooleanField, PasswordField
from flask_wtf.recaptcha import RecaptchaField
from flask_babelpkg import lazy_gettext
from wtforms.validators import DataRequired, EqualTo, Email
from ..fieldwidgets import BS3PasswordFieldWidget, BS3TextFieldWidget
from ..forms import DynamicForm
class LoginForm_oid(DynamicForm):
openid = StringField(lazy_gettext('OpenID'), validators=[DataRequired()])
username = StringField | (lazy_gettext('User Name'))
remember_m | e = BooleanField(lazy_gettext('Remember me'), default=False)
class LoginForm_db(DynamicForm):
username = StringField(lazy_gettext('User Name'), validators=[DataRequired()])
password = PasswordField(lazy_gettext('Password'), validators=[DataRequired()])
class ResetPasswordForm(DynamicForm):
password = PasswordField(lazy_gettext('Password'),
description=lazy_gettext(
'Please use a good password policy, this application does not check this for you'),
validators=[DataRequired()],
widget=BS3PasswordFieldWidget())
conf_password = PasswordField(lazy_gettext('Confirm Password'),
description=lazy_gettext('Please rewrite the password to confirm'),
validators=[EqualTo('password', message=lazy_gettext('Passwords must match'))],
widget=BS3PasswordFieldWidget())
class RegisterUserDBForm(DynamicForm):
username = StringField(lazy_gettext('User Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
first_name = StringField(lazy_gettext('First Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
last_name = StringField(lazy_gettext('Last Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
email = StringField(lazy_gettext('Email'), validators=[DataRequired(), Email()], widget=BS3TextFieldWidget())
password = PasswordField(lazy_gettext('Password'),
description=lazy_gettext(
'Please use a good password policy, this application does not check this for you'),
validators=[DataRequired()],
widget=BS3PasswordFieldWidget())
conf_password = PasswordField(lazy_gettext('Confirm Password'),
description=lazy_gettext('Please rewrite the password to confirm'),
validators=[EqualTo('password', message=lazy_gettext('Passwords must match'))],
widget=BS3PasswordFieldWidget())
recaptcha = RecaptchaField()
class RegisterUserOIDForm(DynamicForm):
username = StringField(lazy_gettext('User Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
first_name = StringField(lazy_gettext('First Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
last_name = StringField(lazy_gettext('Last Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
email = StringField(lazy_gettext('Email'), validators=[DataRequired(), Email()], widget=BS3TextFieldWidget())
recaptcha = RecaptchaField()
|
sniemi/SamPy | sandbox/src1/examples/mathtext_examples.py | Python | bsd-2-clause | 2,927 | 0.0041 | #!/usr/bin/env python
import os, sys, re
import gc
stests = [
r'Kerning: AVA $AVA$',
r'\$100.00 $\alpha \_$',
r'$\frac{\$100.00}{y}$',
r'$x y$',
r'$x+y\ x=y\ x<y\ x:y\ x,y\ x@y$',
r'$100\%y\ x*y\ x/y x\$y$',
r'$x\leftarrow y\ x\forall y\ x-y$',
r'$x \sf x \bf x {\cal X} \rm x$',
r'$x\ x\,x\;x\quad x\qquad x\!x\hspace{ 0.5 }y$',
r'$\{ \rm braces \}$',
r'$\left[\left\lfloor\frac{5}{\frac{\left(3\right)}{4}} y\right)\right]$',
r'$\left(x\right)$',
r'$\sin(x)$',
r'$x_2$',
r'$x^2$',
r'$x^2_y$',
r'$x_y^2$',
r'$\prod_{i=\alpha_{i+1}}^\infty$',
r'$x = \frac{x+\frac{5}{2}}{\frac{y+3}{8}}$',
r'$dz/dt = \gamma x^2 + {\rm sin}(2\pi y+\phi)$',
r'Foo: $\alpha_{i+1}^j = {\rm sin}(2\pi f_j t_i) e^{-5 t_i/\tau}$',
r'$\mathcal{R}\prod_{i=\alpha_{i+1}}^\infty a_i \sin(2 \pi f x_i)$',
# r'$\bigodot \bigoplus {\sf R} a_i{\rm sin}(2 \pi f x_i)$',
r'Variable $i$ is good',
r'$\Delta_i^j$',
r'$\Delta^j_{i+1}$',
r'$\ddot{o}\acute{e}\grave{e}\hat{O}\breve{\imath}\tilde{n}\ | vec{q}$',
r'$_i$',
r"$\arccos((x^i))$",
r"$\gamma = \frac{x=\frac{6}{8}}{y} \delta$",
r'$\limsup_{x\to\infty}$',
r'$\oint^\infty_0$',
r"$f^'$" | ,
r'$\frac{x_2888}{y}$',
r"$\sqrt[3]{\frac{X_2}{Y}}=5$",
r"$\sqrt[5x\pi]{\prod^\frac{x}{2\pi^2}_\infty}$",
r"$\sqrt[3]{x}=5$",
r'$\frac{X}{\frac{X}{Y}}$',
# From UTR #25
r"$W^{3\beta}_{\delta_1 \rho_1 \sigma_2} = U^{3\beta}_{\delta_1 \rho_1} + \frac{1}{8 \pi 2} \int^{\alpha_2}_{\alpha_2} d \alpha^\prime_2 \left[\frac{ U^{2\beta}_{\delta_1 \rho_1} - \alpha^\prime_2U^{1\beta}_{\rho_1 \sigma_2} }{U^{0\beta}_{\rho_1 \sigma_2}}\right]$",
r'$\mathcal{H} = \int d \tau \left(\epsilon E^2 + \mu H^2\right)$',
r'$\widehat{abc}\widetilde{def}$',
r'$\Gamma \Delta \Theta \Lambda \Xi \Pi \Sigma \Upsilon \Phi \Psi \Omega$',
r'$\alpha \beta \gamma \delta \epsilon \zeta \eta \theta \iota \lambda \mu \nu \xi \pi \kappa \rho \sigma \tau \upsilon \phi \chi \psi$',
ur'Generic symbol: $\u23ce \mathrm{\ue0f2 \U0001D538}$'
]
from pylab import *
def doall():
tests = stests
figure(figsize=(8, (len(tests) * 1) + 2))
plot([0, 0], 'r')
grid(False)
axis([0, 3, -len(tests), 0])
yticks(arange(len(tests)) * -1)
for i, s in enumerate(tests):
print (i, s)
text(0.1, -i, s, fontsize=20)
savefig('mathtext_examples')
#close('all')
show()
if '--latex' in sys.argv:
fd = open("mathtext_examples.ltx", "w")
fd.write("\\documentclass{article}\n")
fd.write("\\begin{document}\n")
fd.write("\\begin{enumerate}\n")
for i, s in enumerate(stests):
s = re.sub(r"(?<!\\)\$", "$$", s)
fd.write("\\item %s\n" % s)
fd.write("\\end{enumerate}\n")
fd.write("\\end{document}\n")
fd.close()
os.system("pdflatex mathtext_examples.ltx")
else:
doall()
|
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/utils/functional.py | Python | mit | 16,501 | 0.000788 | import copy
import operator
import sys
import warnings
from functools import wraps
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.six.moves import copyreg
# You can't trivially replace this with `functools.partial` because this binds
# to classes and returns bound instances, whereas functools.partial (on
# CPython) is a type and its instances don't bind.
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*(args + moreargs), **dict(kwargs, **morekwargs))
return _curried
def memoize(func, cache, num_args):
"""
Wrap a function so that results for any argument tuple are stored in
'cache'. Note that the args to the function must be usable as dictionary
keys.
Only the first num_args are considered when creating the key.
"""
warnings.warn("memoize wrapper is deprecated and will be removed in "
"Django 1.9. Use django.utils.lru_cache instead.",
RemovedInDjango19Warning, stacklevel=2)
@wraps(func)
def wrapper(*args):
mem_args = args[:num_args]
if mem_args in cache:
return cache[mem_args]
result = func(*args)
cache[mem_args] = result
return result
return wrapper
class cached_property(object):
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
Optional ``name`` argument allows you to make cached properties of other
methods. (e.g. url = cached_property(get_absolute_url, name='url') )
"""
def __init__(self, func, name=None):
self.func = func
self.__doc__ = getattr(func, '__doc__')
self.name = name or func.__name__
def __get__(self, instance, type=None):
if instance is None:
return self
res = instance.__dict__[self.name] = self.func(instance)
return res
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turns any callable into a lazy evaluated callable. You need to give result
classes or types -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
@total_ordering
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__prepared = False
def __init__(self, args, kw):
self.__args = args
self.__kw = kw
if not self.__prepared:
self.__prepare_class__()
self.__prepared = True
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(func, self.__args, self.__kw) + resultclasses
)
@classmethod
def __prepare_class__(cls):
for resultclass in resultclasses:
for type_ in resultclass.mro():
for method_name in type_.__dict__.keys():
# All __promise__ return the same wrapper method, they
# look up the correct implementation when called.
if hasattr(cls, method_name):
continue
meth = cls.__promise__(method_name)
setattr(cls, method_name, meth)
cls._delegate_bytes = bytes in resultclasses
cls._delegate_text = six.text_type in resultclasses
assert not (cls._delegate_bytes and cls._delegate_text), (
"Cannot call lazy() with both bytes and text return types.")
if cls._delegate_text:
if six.PY3:
cls.__str__ = cls.__text_cast
else:
cls.__unicode__ = cls.__text_cast
cls.__str__ = cls.__bytes_cast_encoded
elif cls._delegate_bytes:
if six.PY3:
cls.__bytes__ = cls.__bytes_cast
else:
cls.__str__ = cls.__bytes_cast
@classmethod
def __promise__(cls, method_name):
# Builds a wrapper around some magic method
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = func(*self.__args, **self.__kw)
return getattr(res, method_name)(*args, **kw)
return __wrapper__
def __text_cast(self):
return func(*self.__args, **self.__kw)
def __bytes_cast(self):
return bytes(func(*self.__args, **self.__kw))
def __bytes_cast_encoded(self):
return func(*self.__args, **self.__kw).encode('utf-8')
def __cast(self):
if self._delegate_bytes:
return self.__bytes_cast()
elif self._delegate_text:
return self.__text_cast()
else:
return func(*self.__args, **self.__kw)
def __ne__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() != other
def __eq__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() == other
def __lt__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() < other
def __hash__(self):
return hash(self.__cast())
def __mod__(self, rhs):
if self._delegate_bytes and six.PY2:
return bytes(self) % rhs
elif self._delegate_text:
return six.text_type(self) % rhs
return self.__cast() % rhs
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's ju | st a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
@wraps(func)
def __wrapper__(*args, **kw):
# Creates the pr | oxy object, instead of the actual value.
return __proxy__(args, kw)
return __wrapper__
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
def allow_lazy(func, *resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
lazy_func = lazy(func, *resultclasses)
@wraps(func)
def wrapper(*args, **kwargs):
for arg in list(args) + list(six.itervalues(kwargs)):
if isinstance(arg, Promise):
break
else:
return func(*args, **kwargs)
return lazy_func(*args, **kwargs)
return wrapper
empty = object()
def new_method_proxy(func):
def inner(self, *args):
if self._wrapped is empty:
self._setup()
return func(self._wrapped, *args)
return inner
class LazyObject(object):
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use S |
silly-wacky-3-town-toon/SOURCE-COD | toontown/pgui/DirectEntryScroll.py | Python | apache-2.0 | 4,075 | 0.011779 | __all__ = ['DirectEntryScroll']
from pandac.PandaModules import *
import DirectGuiGlobals as DGG
from DirectScrolledFrame import *
from DirectFrame import *
from DirectEntry import *
class DirectEntryScroll(DirectFrame):
def __init__(self, entry, parent = None, **kw):
optiondefs = (
('pgFunc', PGVirtualFrame, None),
('relief', None, None),
('clipSize', (-1, 1, -1, 1), self.setClipSize),
)
self.defineoptions(kw, optiondefs)
DirectFrame.__init__(self, parent, **kw)
self.canvas = None
self.visXMin = 0.0
self.visXMax = 0.0
self.clipXMin = 0.0
self.clipXMax = 0.0
self.initialiseoptions(DirectEntryScroll)
# don't set a scale on the entry
# instead make it the correct size, use something like:
# text_scale = 0.035,
# frameSize = (-0.006, 3.2, -0.015, 0.036),
# if you need to scale the entry scale it's parent instead
self.entry = entry
self.canvas = NodePath(self.guiItem.getCanvasNode())
self.entry.reparentTo(self.canvas)
self.canvas.setPos(0,0,0)
self.entry.bind(DGG.CURSORMOVE,self.cursorMove)
self.canvas.node().setBounds(OmniBoundingVolume())
self.canvas.node().setFinal(1)
self.resetCanvas()
def cursorMove(self, cursorX, cursorY):
cursorX = self.entry.guiItem.getCursorX() * self.entry['text_scale'][0]
canvasX = self.canvas.getX()
visXMin = self.clipXMin - canvasX
visXMax = self.clipXMax - canvasX
visXCenter = (visXMin + visXMax) * 0.5
distanceToCenter = visXCenter - cursorX
clipExtent = self.clipXMax - self.clipXMin
entryExtent = self.entry['text_scale'][0] * self.entry['width']
entryWiggle = entryExtent - clipExtent
if abs(distanceToCenter) > | (clipExtent * 0.5):
self.moveToCenterCursor()
def moveToCenterCursor(self):
cursorX = self.entry.guiItem.getCursorX() * self.entry['text_scale'][0]
canvasX = self.canvas.getX()
visXMin = self.clipXMin - canvasX
visXMax = self.cli | pXMax - canvasX
visXCenter = (visXMin + visXMax) * 0.5
distanceToCenter = visXCenter - cursorX
newX = canvasX + distanceToCenter
clipExtent = self.clipXMax - self.clipXMin
entryExtent = self.entry['text_scale'][0] * self.entry['width']
entryWiggle = entryExtent - clipExtent
if self.entry.guiItem.getCursorPosition() <= 0: #deals with the cursor jump bug
newX = 0.0
elif newX > 0.0:
newX = 0.0
elif newX < (-entryWiggle):
newX = -entryWiggle
#print("CursorX %s CanvasX %s VisCenter %s Distance %s NewX %s Wiggle %s" % (cursorX, canvasX, visXCenter, distanceToCenter, newX, entryWiggle))
self.canvas.setX(newX)
def destroy(self):
# Destroy children of the canvas
for child in self.canvas.getChildren():
childGui = self.guiDict.get(child.getName())
if childGui:
childGui.destroy()
else:
parts = child.getName().split('-')
simpleChildGui = self.guiDict.get(parts[-1])
if simpleChildGui:
simpleChildGui.destroy()
self.entry.destroy()
self.entry = None
DirectFrame.destroy(self)
def getCanvas(self):
return self.canvas
def setClipSize(self):
self.guiItem.setClipFrame(self['clipSize'])
self.clipXMin = self['clipSize'][0]
self.clipXMax = self['clipSize'][1]
self.visXMin = self.clipXMin
self.visXMax = self.clipXMax
if self.canvas:
self.resetCanvas()
def resetCanvas(self):
self.canvas.setPos(0,0,0)
|
rsnakamura/theape | setup.py | Python | mit | 1,343 | 0.016381 | try:
from se | tuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
with open('readme.rst') as reader:
long_description = reader.read()
setup(name='theape',
long_description=long_description,
version= '2014.11.10',
description="The All-Purpose Experimenter.",
author="russell",
platforms=['linux'],
url = '',
author_email="necromuralist@gmail.com",
licen | se = "MIT",
install_requires = 'pudb numpy paramiko configobj docopt'.split(),
packages = find_packages(),
include_package_data = True,
package_data = {"theape":["*.txt", "*.rst", "*.ini"]},
entry_points = """
[console_scripts]
theape=theape.main:main
[theape.subcommands]
subcommands=theape.infrastructure.arguments
[theape.plugins]
plugins = theape.plugins
"""
)
# an example last line would be cpm= cpm.main: main
# If you want to require other packages add (to setup parameters):
# install_requires = [<package>],
#version=datetime.today().strftime("%Y.%m.%d"),
# if you have an egg somewhere other than PyPi that needs to be installed as a dependency, point to a page where you can download it:
# dependency_links = ["http://<url>"]
|
victormatheus/simulated_annealing | tests/test_random.py | Python | gpl-3.0 | 694 | 0.024496 | #!/usr/bin/env python
import sys
sys.path.append("..")
import sys
import getopt
from time import time
from simulannealing import SimulatedAnnealing as SimulAnn
import graph as Graph
from graph_utils import GraphGen
# Tamanho previsto para as instancias aleatorias.
instances = [10, 50, 100, 200, 500, 700, 1000, 1500]
for i in instances:
print '##### Tamanho da Instancia: ', i
tempo = time()
graph, terminals | = GraphGen.generate(nodes_number=i, steiner=True)
print 'Tempo de geracao: ', time()-tempo
print 'Numero de termin | ais: ', len(terminals)
tempo = time()
steiner = SimulAnn(graph, terminals).get_min_steiner_tree()
print 'Tempo de execucao da heuristica: ', time()-tempo
|
sdestercke/classifip | classifip/models/knn.py | Python | gpl-2.0 | 5,129 | 0.015988 | from ..dataset.arff import ArffFile
from scipy.spatial import kdtree, distance
from ..representations.intervalsProbability import IntervalsProbability
import numpy as np
from math import exp
class IPKNN(object):
"""IPKNN implements a K-nearest neighbour method using lower previsions.
If data are all precise, it returns
:class:`~classifip.representations.intervalsProbability.IntervalsProbability`
equivalent to a linear vacuous model. The method is based on [#destercke2012]_
:param tree: kdtree structure storing learning data set instances
:type tree: scipy.spatial.kdtree
:param truelabels: store the true labels of learning instances
:type truelabels: list of labels
:param beta: exponent parameter used in discounting rate
:type beta: positive float
:param epsilon: base discounting rate
:type epsilon: float between 0 and 1
:param av_dist: average distances of members of a given class
:type av_dist: float
:param classes: list of class names
.. note::
* Assumes that the class attribute is the last one in samples in the learning method
* If too many data, average distance approximated by sampling
.. todo::
* Make it possible for the class to be in any column (retrieve index)
"""
def __init__(self):
"""Build an empty IPKNN structure
"""
self.tree=None
self.truelabels=[]
self.beta=1.5
self.epsilon=0.99
self.classes=[]
self.av_dist=[]
def learn(self,learndataset):
"""learn the KNN structure required to evaluate new instances
:param learndataset: learning instances
:type learndataset: :class:`~classifip.dataset.arff.ArffFile`
"""
self.__init__()
self.classes=learndataset.attribute_data['class'][:]
#Initialize average distance for every possible class
for i in learndataset.attribute_data['class']:
class_set=learndataset.select_class([i])
values=[row[0:len(row)-1] for row in class_set.data]
if len(values) > 1000:
valred=np.random.permutation(values)[0:1000]
class_distances=distance.cdist(valred,valred)
else:
class_distances=distance.cdist(values,values)
average=class_distances.sum()/(len(class_distances)**2
-len(class_distances))
self.av_dist.append(average)
# training the whole thing
learndata=[row[0:len(row)-1] for row in learndataset.data]
self.truelabels=[row[-1] for row in learndataset.data]
self.tree=kdtree.KDTree(learndata)
def evaluate(self,testdataset,knn_beta=1.5,knn_epsilon=0.99,knn_nb_neigh=3):
"""evaluate the instances and return a list of probability intervals
:param testdataset: list of input features of instances to evaluate
:type dataset: list
:param knn_beta: value of beta parameter used in evaluation
:type knn_beta: float
:param knn_epsilon: value of base discounting rate to use
:type knn_epsilon: float
:param knn_nb_neigh: values of number f neighbours to use
:type knn_nb_neigh: list of int
:returns: for each value of knn_nb_neigh, a set of probability intervals
:rtype: lists of :class:`~classifip.representations.intervalsProbability.IntervalsProbability`
"""
final=[]
self.beta=knn_beta
self.epsilon=knn_epsilon
answers | =[]
for i in testdataset:
resulting_int=np.zeros((2,len(self.classes)))
query=self.tree.query(i,knn_nb_neigh)
#ensure query returns li | st of array
if query[0].__class__.__name__!='ndarray':
query=list(query)
query[0]=[query[0]]
query[1]=[query[1]]
for k in range(len(query[0])):
#retrieve class index of kth neighbour
neigh_class=self.classes.index(self.truelabels[query[1][k]])
#compute the linear vacuous model of this neighbour
#the higher discount, the most original info is kept
#discount~reliability of the information between [0,1]
expon=-((query[0][k])**(self.beta))/self.av_dist[neigh_class]
discount=(self.epsilon)*(exp(expon))
up=np.zeros(len(self.classes))
up.fill(1-discount)
up[neigh_class]=1
down=np.zeros(len(self.classes))
down[neigh_class]=discount
resulting_int[0]+=up
resulting_int[1]+=down
# make the average of all k obtained models
resulting_int[0]=resulting_int[0]/knn_nb_neigh
resulting_int[1]=resulting_int[1]/knn_nb_neigh
result=IntervalsProbability(resulting_int)
answers.append(result)
return answers
|
BrainTech/openbci | obci/analysis/balance/raw_analysis.py | Python | gpl-3.0 | 8,820 | 0.031633 | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
def test_signal(T=100, fs=1):
dt = 1/fs
x = np.arange(0, T, dt)
y = np.ones(x.shape)
return np.vstack((x,y))
def test_signal1(T=16.0, fs=10.0):
dt = 1/fs
x = np.arange(-T/2, T/2, dt)
y = np.arange(-T/2, T/2, dt)
return np.vstack((x,y))
def plot_graph(ax, title, x, y, xlabel, ylabel):
ax.set_title(title)
ax.plot(x, y)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
def plot_COP(signal, fs):
""" Plots real values of COP positions: COPx(time), COPy(time) and COPy(COPx)
Input:
signal -- 2 dim array -- x,y values
fs -- float -- sampling frequency """
time = np.arange(0,signal.shape[1]*1/fs,1/fs)
f = plt.figure(figsize=(16,12))
ax_x = f.add_subplot(221)
plot_graph(ax_x, 'COPx position', time, signal[0],
'time [s]', 'position COPx [cm]')
# ax_x.set_xlim(0,30)
ax_y = f.add_subplot(223)
plot_graph(ax_y, 'COPy position', time, signal[1],
'time [s]', 'position COPy | [cm]')
# ax_y.set_xlim(0,30)
ax_xy = f.add_subplot(122)
plot_graph(ax_xy, 'COP position', signal[0][:int(fs*30)],
signal[1][:int(fs*30)], 'position COPx [cm]',
'position COPy [cm]')
f.canvas.draw()
#f.savefig(str(file_name)+'.png')
def calculate_distance(position_1, position_2):
""" Returns distance between two points or 1-dim and 2-dim | vectors
input:
position1 -- int or 1-dim or 2-dim matrix
position2 -- int or 1-dim or 2-dim matrix
position1 and position2 must have the same dimension
"""
distance = position_2 - position_1
try:
return (distance[0]**2+distance[1]**2)**0.5
except IndexError:
return np.abs(distance)
def max_sway_AP_ML(signal):
""" Returns maximal sway values for mediolateral (x) and anterioposterior (y) directions
Input:
signal -- 2-dim array with shape (channel, samples)
(on the index 0 - samples from channel x, on index 1 - samples from channel y)
Output:
max_sway -- float
max_AP -- float
max_ML -- float
"""
resultant_distance = np.sqrt((signal[0]-np.mean(signal[0]))**2+(signal[1]-np.mean(signal[1]))**2)
distance_AP = signal[1]-np.mean(signal[1])
distance_ML = signal[0]-np.mean(signal[0])
return max(resultant_distance), max(np.abs(distance_AP)), max(np.abs(distance_ML))
def mean_COP_sway_AP_ML(signal):
""" Returns mean sway values for mediolateral (x) and anterioposterior (y) directions
Input:
signal -- 2-dim array with shape (channel, samples)
(on the index 0 - samples from channel x, on index 1 - samples from channel y)
Output:
mean_sway -- float
mean_AP -- float
mean_ML -- float
"""
resultant_distance = np.sqrt((signal[0]-np.mean(signal[0]))**2+(signal[1]-np.mean(signal[1]))**2)
distance_AP = signal[1]-np.mean(signal[1])
distance_ML = signal[0]-np.mean(signal[0])
return np.mean(resultant_distance), np.mean(np.abs(distance_AP)), np.mean(np.abs(distance_ML))
def COP_path(signal):
""" Returns total length of the COP path
Input:
signal -- 2-dim array with shape (channel, samples)
(on the index 0 - samples from channel x, on index 1 - samples from channel y)
Output:
cop -- float
cop_x -- float
cop_y -- float
"""
cop = sum([calculate_distance(signal[:,index], signal[:, index+1]) for index in xrange(signal.shape[1]-1)])
cop_x = sum([calculate_distance(signal[0,index], signal[0, index+1]) for index in xrange(signal.shape[1]-1)])
cop_y = sum([calculate_distance(signal[1,index], signal[1, index+1]) for index in xrange(signal.shape[1]-1)])
return cop, cop_x, cop_y
def RMS_AP_ML(signal):
""" Returns Root Mean Square (RMS) values in medio-lateral and anterio-posterior directions"""
resultant_distance = np.sqrt((signal[0]-np.mean(signal[0]))**2+(signal[1]-np.mean(signal[1]))**2)
distance_AP = signal[1]-np.mean(signal[1])
distance_ML = signal[0]-np.mean(signal[0])
RMS = np.sqrt(1./signal.shape[1]*np.sum(resultant_distance**2))
RMS_ML = np.sqrt(1./signal.shape[1]*np.sum(distance_ML**2))
RMS_AP = np.sqrt(1./signal.shape[1]*np.sum(distance_AP**2))
return RMS, RMS_AP, RMS_ML
def confidence_ellipse_area(signal):
""" Returns area of the 95 perc. confidence ellipse"""
s_AP = np.std(signal[1])
s_ML = np.std(signal[0])
s_AP_ML = 1./signal.shape[1]*np.sum((signal[0]-np.mean(signal[0]))*(signal[1]-np.mean(signal[1])))
area = 2*np.pi*3.0*np.sqrt(s_AP**2*s_ML**2-s_AP_ML**2)
return area
def mean_velocity(signal, fs):
""" Returns average velocity of the COP, in ML and AP directions"""
cop, cop_x, cop_y = COP_path(signal)
mean_velocity = cop/(signal.shape[1]/fs)
velocity_AP = cop_y/(signal.shape[1]/fs)
velocity_ML = cop_x/(signal.shape[1]/fs)
return mean_velocity, velocity_AP, velocity_ML
# def reaction_time():
# """ Returns reaction time (2 standard deviations from baseline rest period till start signal)"""
# return rt
# def triangles_area():
# """ Returns sway area (AREA-SW), which estimates the area enclosed by the COP path per unit of time.
# This measure is approximated by summing the area of the triangles formed by two consecutive points
# on the COP path and the mean COP """
# return area_sw
# def mean_angle():
# """ Returns mean sway angle """
# return angle
def get_percentages_being(signal, fs, grid=0.1, plot=True):
"""Return how long person was on o field grig x grid (%)
Input:
signal -- 2-dim array with shape (channel, samples)
(index 0 - samples from channel x, index 1 - samples from channel y)
grid -- float -- default 0.1 - side of the field which was divided space
plot -- bool -- default True)
Output:
percentages_being -- 2-dim array (with shape (xedges - 1 , yedges - 1))
with information how long subject was on board
field [xedges[n], xedges[n+1],yedges[n], yedges[n+1]]
xedges -- 1-dim array with value of x_grid edges
yedges -- 1-dim array with value of y_grid edges
"""
x_min = signal[0].min()
x_max = signal[0].max()
y_min = signal[1].min()
y_max = signal[1].max()
grid_x, grid_y = get_grid(grid, x_min, x_max, y_min, y_max)
percentages_being, xedges, yedges = np.histogram2d(signal[0], signal[1], bins=[grid_x.shape[0], grid_y.shape[0]], range=[[x_min, x_max],[y_min, y_max]])
percentages_being *= 1/fs
percentages_being /= (signal.shape[1]*1/fs)
percentages_being *= 100
if plot:
plot_percentages_being(grid, percentages_being, xedges, yedges, signal)
plt.show()
return percentages_being, xedges, yedges
def get_percentages_values(signal, fs, plot=True):
"""
Returns percentages of being on each of four parts of board.
Input:
signal -- 2-dim array with shape (channel, samples)
(index 0 - samples from channel x, index 1 - samples from channel y)
fs -- float -- sampling frequency
Output:
top_right -- float
top_left -- float
bottom_right -- float
bottom_left -- float
"""
p, xedges, yedges = get_percentages_being(signal, fs, plot=plot)
top_right = 0
top_left = 0
bottom_right = 0
bottom_left = 0
for j,x in enumerate(xedges[1:-1]):
for i,y in enumerate(yedges[1:-1]):
if x > 0 and y > 0:
top_right += p[j,i]
elif x < 0 and y > 0:
top_left += p[j,i]
elif x < 0 and y < 0:
bottom_left += p[j,i]
elif x > 0 and y < 0:
bottom_right += p[j,i]
return top_right, top_left, bottom_right, bottom_left
def get_grid(grid, x_min, x_max, y_min, y_max):
grid_y = np.arange(-22.5,22.5, grid)
grid_x = np.arange(-13, 13, grid)
index_min_y = np.searchsorted(grid_y, y_min)
index_max_y = np.searchsorted(grid_y, y_max)
index_min_x = np.searchsorted(grid_x, x_min)
index_max_x = np.searchsorted(grid_x, x_max)
return grid_x[index_min_x-1:index_max_x+1], grid_y[index_min_y-1:index_max_y+1]
def plot_percentages_being(grid, percentages_being, xedges, yedges, sig):
fig = plt.figure()
ax = fig.gca()
ax.set_title('histogram with percentagles\nbegining in field {}cm x {}cm [time %].'.format(grid, grid))
im = mpl.image.NonUniformImage(ax, interpolation='nearest')
xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
im.set_data(xcenters, ycen |
smmribeiro/intellij-community | python/helpers/typeshed/tests/mypy_test.py | Python | apache-2.0 | 12,194 | 0.002952 | #!/usr/bin/env python3
"""Test runner for typeshed.
Depends on mypy being installed.
Approach:
1. Parse sys.argv
2. Compute appropriate arguments for mypy
3. Stuff those arguments into sys.argv
4. Run mypy.main('')
5. Repeat steps 2-4 for other mypy runs (e.g. --py2)
"""
import argparse
import os
import re
import sys
import tempfile
from glob import glob
from pathlib import Path
from typing import Dict, NamedTuple
import tomli
parser = argparse.ArgumentParser(description="Test runner for typeshed. Patterns are unanchored regexps on the full path.")
parser.add_argument("-v", "--verbose", action="count", default=0, help="More output")
parser.add_argument("-n", "--dry-run", action="store_true", help="Don't actually run mypy")
parser.add_argument("-x", "--exclude", type=str, nargs="*", help="Exclude pattern")
parser.add_argument("-p", "--python-version", type=str, nargs="*", help="These versions only (major[.minor])")
parser.add_argument("--platform", help="Run mypy for a certain OS platform (defaults to sys.platform)")
parser.add_argument(
"--warn-unused-ignores",
action="store_true",
help="Run mypy with --warn-unused-ignores "
"(hint: only get rid of warnings that are "
"unused for all platforms and Python versions)",
)
parser.add_argument("filter", type=str, nargs="*", help="Include pattern (default all)")
def log(args, *varargs):
if args.verbose >= 2:
print(*varargs)
def match(fn, args):
if not args.filter and not args.exclude:
log(args, fn, "accept by default")
return True
if args.exclude:
for f in args.exclude:
if re.search(f, fn):
log(args, fn, "excluded by pattern", f)
return False
if args.filter:
for f in args.filter:
if re.search(f, fn):
log(args, fn, "accepted by pattern", f)
return True
if args.filter:
log(args, fn, "rejected (no pattern matches)")
return False
log(args, fn, "accepted (no exclude pattern matches)")
return True
_VERSION_LINE_RE = re.compile(r"^([a-zA-Z_][a-zA-Z0-9_.]*): ([23]\.\d{1,2})-([23]\.\d{1,2})?$")
def parse_versions(fname):
result = {}
with open(fname) as f:
for line in f:
# Allow having some comments or empty lines.
line = line.split("#")[0].strip()
if line == "":
continue
m = _VERSION_LINE_RE.match(line)
assert m, "invalid VERSIONS line: " + line
mod = m.group(1)
min_version = parse_version(m.group(2))
max_version = parse_version(m.group(3)) if m.group(3) else (99, 99)
result[mod] = min_version, max_version
return result
_VERSION_RE = re.compile(r"^([23])\.(\d+)$")
def parse_version(v_str):
m = _VERSION_RE.match(v_str)
assert m, "invalid version: " + v_str
return int(m.group(1)), int(m.group(2))
def is_supported(distribution, major):
dist_path = Path("stubs", distribution)
with open(dist_path / "METADATA.toml") as f:
data = dict(tomli.loads(f.read()))
if major == 2:
# Python 2 is not supported by default.
return bool(data.get("python2", False)) or (dist_path / "@python2").exists()
# Python 3 is supported by default.
return has_py3_stubs(dist_path)
# Keep this in sync with stubtest_third_party.py
def has_py3_stubs(dist: Path) -> bool:
return len(glob(f"{dist}/*.pyi")) > 0 or len(glob(f"{dist}/[!@]*/__init__.pyi")) > 0
def add_files(files, seen, root, name, args):
"""Add all files in package or module represented by 'name' located in 'root'."""
full = os.path.join(root, name)
mod, ext = os.path.splitext(name)
if ext in [".pyi", ".py"]:
if match(full, args):
seen.add(mod)
files.append(full)
elif os.path.isfile(os.path.join(full, "__init__.pyi")) or os.path.isfile(os.path.join(full, "__init__.py")):
for r, ds, fs in os.walk(full):
ds.sort()
fs.sort()
for f in fs:
m, x = os.path.splitext(f)
if x in [".pyi", ".py"]:
fn = os.path.join(r, f)
if match(fn, args):
seen.add(mod)
files.append(fn)
class MypyDistConf(NamedTuple):
module_name: str
values: Dict
# The configuration section in the metadata file looks like the following, with multiple module sections possible
# [mypy-tests]
# [mypy-tests.yaml]
# module_name = "yaml"
# [mypy-tests.yaml.values]
# disallow_incomplete_defs = true
# disallow_untyped_defs = true
def add_configuration(configurations: list[MypyDistConf], distribution: str) -> None:
with open(os.path.join("stubs", distribution, "METADATA.toml")) as f:
data = dict(tomli.loads(f.read()))
mypy_tests_conf = data.get("mypy-tests")
if not mypy_tests_conf:
return
assert isinstance(mypy_tests_conf, dict), "mypy-tests should be a section"
for section_name, mypy_section in mypy_tests_conf.items():
assert isinstance(mypy_section, dict), "{} should be a section".format(section_name)
module_name = mypy_section.get("module_name")
assert module_name is not None, "{} should have a module_name key".format(section_name)
assert isinstance(module_name, str), "{} should be a key-value pair".format(section_name)
values = mypy_section.get("values")
assert values is not None, "{} should have a values section".format(section_name)
assert isinstance(values, dict), "values should be a section"
configurations.append(MypyDistConf(module_name, values.copy()))
def run_mypy(args, configurations, major, minor, files, *, custom_typeshed=False):
try:
from mypy.main import main as mypy_main
except ImportError:
print("Cannot import mypy. Did you install it?")
sys.exit(1)
with tempfile.NamedTemporaryFile("w+") as temp:
temp.write("[mypy]\n")
for dist_conf in configurations:
temp.write("[mypy-%s]\n" % dist_conf.module_name)
for k, v in dist_conf.values.items():
temp.write("{} = {}\n".format(k, v))
temp.flush()
flags = get_mypy_flags(args, major, minor, temp.name, custom_typeshed=custom_typeshed)
sys.argv = ["mypy"] + | flags + files
if args.verbose:
print(" | running", " ".join(sys.argv))
if not args.dry_run:
try:
mypy_main("", sys.stdout, sys.stderr)
except SystemExit as err:
return err.code
return 0
def get_mypy_flags(args, major: int, minor: int, temp_name: str, *, custom_typeshed: bool = False) -> list[str]:
flags = [
"--python-version",
"%d.%d" % (major, minor),
"--config-file",
temp_name,
"--no-site-packages",
"--show-traceback",
"--no-implicit-optional",
"--disallow-any-generics",
"--warn-incomplete-stub",
"--show-error-codes",
"--no-error-summary",
]
if custom_typeshed:
# Setting custom typeshed dir prevents mypy from falling back to its bundled
# typeshed in case of stub deletions
flags.extend(["--custom-typeshed-dir", os.path.dirname(os.path.dirname(__file__))])
if args.warn_unused_ignores:
flags.append("--warn-unused-ignores")
if args.platform:
flags.extend(["--platform", args.platform])
return flags
def read_dependencies(distribution: str) -> list[str]:
with open(os.path.join("stubs", distribution, "METADATA.toml")) as f:
data = dict(tomli.loads(f.read()))
requires = data.get("requires", [])
assert isinstance(requires, list)
dependencies = []
for dependency in requires:
assert isinstance(dependency, str)
assert dependency.startswith("types-")
dependencies.append(dependency[6:])
return dependencies
def add_third_party_files(
distribution: str, major: int, files: list[str], args, configurations: list[MypyDistConf], seen_dists: set[str]
) -> None:
if dis |
lino-framework/book | lino_book/projects/20100206/models.py | Python | bsd-2-clause | 1,674 | 0.007766 | """
Allow empty non-nullable ForeignKey fields until save()
Django Ticket #12801 (http://code.d | jangoproject.com/ticket/12801)
Follow-up to Django Ticket #12708 (http://code.djangoproject.com/ticket/12708)
When a model has non-nullable fields (that don't have `null=True`), then Django
still accepts to instantiate objects of that model where these fields are empty.
An exception is raised only when you try to | save such an (invalid) instance.
Unfortunately this is not true for ForeignKey fields.
If you leave a non-nullable ForeignKey field empty, Django behaves strangely:
your instance is then like a time bomb, causing an exception when you only consult that field.
We create an Order without a Journal:
>>> o = Order()
>>> print o.date_field
None
>>> print o.char_field
<BLANKLINE>
>>> print o.int_field
None
>>> print o.decimal_field
None
No problem so far. And of course, if you try to save this, you'll get an exception:
>>> o.save()
Traceback (most recent call last):
...
IntegrityError: 20100206_order.date_field may not be NULL
But ForeignKey fields are different: you get an exception when you only look at them:
>>> print o.fk_field
Traceback (most recent call last):
...
DoesNotExist
This behaviour is not sane.
The line `print o.fk_field` should output `None`, like it does for other field types.
"""
from django.db import models
class Journal(models.Model):
pass
class Order(models.Model):
date_field = models.DateField()
char_field = models.CharField(max_length=20)
int_field = models.IntegerField()
decimal_field = models.IntegerField()
fk_field = dd.ForeignKey(Journal)
|
niwinz/Green-Mine | src/greenmine/questions/urls.py | Python | bsd-3-clause | 564 | 0.007092 | # -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, include, url
from .views import *
urlpatterns = patterns('',
url(r'^$', QuestionsListView.as_view(), name='questions'),
url(r'^create/$', QuestionsCreateView.as_view(), name='questions-create'),
url(r'^(?P<qslug>[\w\d\-]+)/view/$', QuestionsView.as_view(), name='questions-view'),
url(r'^(?P<qslug>[\w\d\-]+)/edit/$', QuestionsEditView.as_view(), name='questions-edit'),
| url(r'^(?P<qslug>[\w\d\-]+)/delet | e/$', QuestionsDeleteView.as_view(), name='questions-delete'),
)
|
sendgridlabs/ddbmock | tests/functional/pyramid/test_scan.py | Python | lgpl-3.0 | 4,332 | 0.001385 | import json
import unittest
TABLE_NAME1 = 'Table-1'
TABLE_RT = 45
TABLE_WT = 123
TABLE_NAME = 'Table-HR'
TABLE_RT = 45
TABLE_WT = 123
TABLE_RT2 = 10
TABLE_WT2 = 10
TABLE_HK_NAME = u'hash_key'
TABLE_HK_TYPE = u'N'
TABLE_RK_NAME = u'range_key'
TABLE_RK_TYPE = u'S'
HK_VALUE1 = u'123'
HK_VALUE2 = u'456'
HK_VALUE3 = u'789'
RK_VALUE1 = u'Waldo-1'
RK_VALUE2 = u'Waldo-2'
RK_VALUE3 = u'Waldo-3'
RK_VALUE4 = u'Waldo-4'
RK_VALUE5 = u'Waldo-5'
ITEM1 = {
TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE1},
TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE1},
u'relevant_data': {u'S': u'tata'},
}
ITEM2 = {
TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE1},
TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE2},
u'relevant_data': {u'S': u'tete'},
}
ITEM3 = {
TABLE_HK_NAME: {TABL | E_HK_TYPE: HK_VALUE2},
TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE3},
u'relevant_data': {u'S': u'titi'},
}
ITEM4 = {
TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE3},
TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE4 | },
u'relevant_data': {u'S': u'toto'},
}
ITEM5 = {
TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE3},
TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE5},
u'relevant_data': {u'S': u'tutu'},
}
HEADERS = {
'x-amz-target': 'dynamodb_20111205.Scan',
'content-type': 'application/x-amz-json-1.0',
}
# Goal here is not to test the full API, this is done by the Boto tests
class TestScan(unittest.TestCase):
def setUp(self):
from ddbmock.database.db import dynamodb
from ddbmock.database.table import Table
from ddbmock.database.key import PrimaryKey
from ddbmock import main
app = main({})
from webtest import TestApp
self.app = TestApp(app)
dynamodb.hard_reset()
hash_key = PrimaryKey(TABLE_HK_NAME, TABLE_HK_TYPE)
range_key = PrimaryKey(TABLE_RK_NAME, TABLE_RK_TYPE)
self.t1 = Table(TABLE_NAME, TABLE_RT, TABLE_WT, hash_key, range_key)
dynamodb.data[TABLE_NAME] = self.t1
self.t1.put(ITEM1, {})
self.t1.put(ITEM2, {})
self.t1.put(ITEM3, {})
self.t1.put(ITEM4, {})
self.t1.put(ITEM5, {})
def tearDown(self):
from ddbmock.database.db import dynamodb
dynamodb.hard_reset()
def test_scan_condition_filter_fields(self):
request = {
"TableName": TABLE_NAME,
"ScanFilter": {
"relevant_data": {
"AttributeValueList": [
{"S":"toto"},
{"S":"titi"},
{"S":"tata"},
],
"ComparisonOperator": "IN",
},
},
"AttributesToGet": [u'relevant_data'],
}
expected = {
u"Count": 3,
u"ScannedCount": 5,
u"Items": [
{u"relevant_data": {u"S": u"tata"}},
{u"relevant_data": {u"S": u"toto"}},
{u"relevant_data": {u"S": u"titi"}},
],
u"ConsumedCapacityUnits": 0.5,
}
# Protocol check
res = self.app.post_json('/', request, headers=HEADERS, status=200)
self.assertEqual(expected, json.loads(res.body))
self.assertEqual('application/x-amz-json-1.0; charset=UTF-8',
res.headers['Content-Type'])
def test_scan_count_and_attrs_to_get_fails(self):
request = {
"TableName": TABLE_NAME,
"ScanFilter": {
"relevant_data": {
"AttributeValueList": [
{"S":"toto"},
{"S":"titi"},
{"S":"tata"},
],
"ComparisonOperator": "IN",
},
},
"AttributesToGet": [u'relevant_data'],
"Count": True,
}
expected = {
u'__type': u'com.amazonaws.dynamodb.v20111205#ValidationException',
u'message': u'Can not filter fields when only count is requested'
}
# Protocol check
res = self.app.post_json('/', request, headers=HEADERS, status=400)
self.assertEqual(expected, json.loads(res.body))
self.assertEqual('application/x-amz-json-1.0; charset=UTF-8',
res.headers['Content-Type'])
|
deepfire/partus | cl.py | Python | gpl-3.0 | 412,627 | 0.016662 |
# Builtins management
def python_builtins_dictionary():
import builtins as _builtins
return _builtins.getattr(__builtins__, "__dict__", __builtins__)
import collections
class dictator(collections.UserDict):
def __hasattr__(self, name): return name in self.data
def __getattr__(self, name): return self.data[name]
def __setitem__(self, *_): raise self.data["Exception"]("Dictator.")
def __delitem__(self, *_): raise self.data["Exception"]("Dictator.")
def __setattr__(self, *_): raise self.data["Exception"]("Dictator.")
def __init__(self, dict):
self.__dict__.update(data = dict)
pyb = dictator(python_builtins_dictionary())
# Imports
import re
import os
import io
import _io
import ast
import imp
import pdb
import sys
import math
import time
import trace
import types
import socket
import hashlib
import inspect
import marshal
import builtins
import operator
import platform
import functools
import itertools
import linecache
import threading
import collections
import neutrality
# Lisp symbol <-> primitive name mapping
def lisp_symbol_name_rtname(x):
def sub(cs):
acc = []
for c in cs:
acc.append("_" if c in "-*:&%" else c)
return acc
ret = "".join(sub(x)).lower()
return ret
def lisp_symbol_name_type_rtname(x):
return lisp_symbol_name_rtname(x) + "_t"
common_ands = { "WHOLE", "OPTIONAL", "REST", "BODY", "KEY", "ALLOW-OTHER-KEYS" }
def rtname_lisp_symbol_name(x):
"""Heuristic to (not quite) undo the effect of _lisp_symbol_name_rtname().
Irreversibles: %."""
def sub(cs):
if len(cs) > 1:
starred = cs[0] == cs[-1] == "_" # *very-nice*
anded = cs[0] == "_" != cs[-1] and cs[1:].upper() in common_ands # &something
maybe_keyword = cs[-1] != cs[0] == "_" != cs[1] # :something
tailed = cs[-1] == "_" != cs[0] # something-in-conflict
else:
starred = anded = maybe_keyword = tailed = False
pre, post, start, end = (("*", "*", 1, len(cs) - 1) if starred else
("&", "", 1, None) if anded else
| (":", "", 1, None) if maybe_keyword else
("", "", 0, len(cs) - 1) if tailed else
("", "", 0, None))
return (pre +
"".join(" | -" if c == "_" else c for c in cs[start:end]) +
post)
ret = sub(x).upper()
return ret
# Default values for optional/key arguments
def defaulted(x, value, type = None):
if x is not None and type is not None:
check_type(x, type) # Not a macro, so cannot access the actual defaulted name..
return x if x is not None else value
def defaulted_to_var(x, variable, type = None):
return x if x is not None else defaulted(x, symbol_value(variable), type = type)
def defaulted_keys(**keys):
return dict((key, (default if value is None else value))
for key, (value, default) in keys.items())
def validate_function_args(desc, f, args):
argspec = inspect.getfullargspec(f)
nfixed = len(argspec.args)
nrequired = nfixed - (len(argspec.defaults) if argspec.defaults else 0)
if len(args) < nrequired:
error("Not enough arguments for %s %s: at least %d are required, but %d were provided -- the argspec is %s, the args were %s.",
desc, f, nrequired, len(args), argspec, args)
if len(args) > nfixed and not argspec.varargs:
error("Too many arguments for %s %s: at most %d are accepted, but %d were provided -- the argspec is %s, the args were %s.",
desc, f, nfixed, len(args), argspec, args)
def validate_function_keys(desc, f, keys):
argspec = inspect.getfullargspec(f)
invalid = (nil if argspec.varkw else
(set(keys.keys()) - set(argspec.args) - set(argspec.kwonlyargs)))
if invalid:
error("Invalid arguments for %s: %s does not expect keyword arguments %s -- the argspec is %s, the keys were %s.",
desc, f, ", ".join("'%s'" % x for x in invalid), argspec, keys)
# Boot messaging
def fprintf(stream, format_control, *format_args):
try:
neutrality.do_write_string(format_control % format_args, stream)
except UnicodeEncodeError:
neutrality.do_write_string((format_control % format_args).encode("utf-8"), stream)
def dprintf(format_control, *format_args, trailing_newline = True):
fprintf(sys.stderr, format_control + ("\n" if trailing_newline else ""), *format_args)
# Meta-boot
def global_(x, globals = globals()):
"""This is important due to the single namespace, and the
consequent shadowing of various specifiers."""
return globals.get(x, None)
## 1. trivial enumeration for later DEFUN/DEFCLASS
__boot_defunned__, __boot_defclassed__ = set(), set()
def boot_defun(fn): __boot_defunned__.add(fn); return fn
def boot_defclass(cls): __boot_defclassed__.add(cls); return cls
## 2. tagged switchables
boot_sets = collections.defaultdict(set)
def boot(set, boot, on_unboot = None):
def definer(orig):
def unboot():
globals()[orig.__name__] = orig
if on_unboot:
on_unboot()
def linkage(*args, **keys):
return boot(orig, *args, **keys)
boot.unboot = unboot
boot.name = orig.__name__
boot_sets[set].add(boot)
return linkage
return definer
def unboot_set(set):
for x in sorted(boot_sets[set], key = lambda x: x.name):
if not hasattr(x, "unboot"):
error("In UNBOOT-SET \"%s\": %s has no 'unboot' attribute.", set, x)
x.unboot()
del boot_sets[set]
# dprintf("; unbooted function set %s, remaining boot sets: %s", repr(set), ", ".join(boot_sets.keys()))
def interpret_toplevel_value(name_or_obj, objness_predicate):
name, obj = ((name_or_obj.__name__, name_or_obj) if objness_predicate(name_or_obj) else
(name_or_obj, None) if isinstance(name_or_obj, (str, symbol_t)) else
error("Bad cold object definition: %s", name_or_obj))
####### Thought paused here:
# ..delay symbol computation!
sym, inmod_name = ((do_intern(rtname_lisp_symbol_name(name))[0], name) if isinstance(name, str) else
(name, lisp_symbol_name_rtname(symbol_name(name))) if isinstance(name, symbol_t) else
error("In cold definition of %s: bad name %s for a cold object.", name, repr(name)))
return obj, sym, inmod_name
# Cold types
cold_condition_type = BaseException
cold_error_type = Exception
cold_hash_table_type = dict
cold_stream_type = _io._IOBase
cold_function_type = types.FunctionType.__mro__[0]
cold_string_type = str
def cold_simple_error(format, *args): raise cold_error_type(format % args)
def cold_typep(x, type):
return isinstance(x, (type if isinstance(x, type) else
type.python_type if isinstance(x, symbol_t) else
cold_simple_error("%s is neither a python type, nor a symbol.",
x.__repr__())))
def cold_the(type, x):
if typep(x, type):
return x
else:
raise cold_simple_error("%s is not a %s.", x.__repr__(), type)
def cold_check_type(x, type):
the(type, x)
t |
haphaeu/yoshimi | EulerProject/Sqrt2.py | Python | lgpl-3.0 | 1,087 | 0.0092 | '''
!!! THIS IS A MODIFICATION OF AN EULER PROBLEM !!!
Instead of doing what the problem asks, this just
iterates and solves sqrt(2)
Project Euler - Problem 57
It is possible to show that the square root of two can | be e | xpressed as an
infinite continued fraction.
2 = 1 + 1/(2 + 1/(2 + 1/(2 + ... ))) = 1.414213...
By expanding this for the first four iterations, we get:
1 + 1/2 = 3/2 = 1.5
1 + 1/(2 + 1/2) = 7/5 = 1.4
1 + 1/(2 + 1/(2 + 1/2)) = 17/12 = 1.41666...
1 + 1/(2 + 1/(2 + 1/(2 + 1/2))) = 41/29 = 1.41379...
The next three expansions are 99/70, 239/169, and 577/408, but the eighth
expansion, 1393/985 (...)
'''
#this imports the module fractions.py, writen by Rafael Rossi
#alternatively, the python module 'fractions' could be imported
#some changes are necessary if the python module is imported
from fractions import *
iters=9
sqr2 = fraction(2,1)
for _ in range(iters):
sqr2 = sumFrac(fraction(2,1), invFrac(sqr2))
sqr2 = sumFrac(fraction(1,1), invFrac(sqr2))
print "With %d iterations, sqrt of 2 is %d/%d=%f" % (iters, sqr2.num, sqr2.den,float(sqr2.num)/sqr2.den) |
laurentb/weboob | modules/apec/job.py | Python | lgpl-3.0 | 1,051 | 0 | # -*- coding: utf-8 -*-
# Copyright(C) 2016 Bezleputh
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# a | long with this weboob module. If not, see <http://www.gnu.org/licenses/>.
APEC_CONTRATS = {
' ': u'-- Indifférent --',
'101888': u'CDI',
'101887': u'CDD',
| '101889': u'Interim',
}
APEC_EXPERIENCE = {
'101882': u'Tous niveaux d\'expérience',
'101881': u'Débutant',
'101883': u'Expérimenté',
}
|
dkotfis/NeuRL | src/nn.py | Python | apache-2.0 | 5,301 | 0.015846 | import numpy as np
class NeuralNet:
def __init__(self, layers, epsilon=0.154, learningRate = 0.1):
'''
Constructor
Arguments:
layers - a numpy array of L integers (L is # layers in the network)
epsilon - one half the interval around zero for setting the initial weights
learningRate - the learning rate for backpropagation
'''
self.layers = layers
self.epsilon = epsilon
self.learningRate = learningRate
self.activation = None
self.regLambda = 0.1
# random.rand gives random number between 0,1, need between -eps, +eps
length = len(layers)
tht = np.empty((length-1), dtype = object)
for i in range(0,length-1):
tht[i]=np.random.rand(layers[i+1],layers[i]+1)
tht = tht * 2. * epsilon - epsilon
self.theta = np.array([])
for i in range(0,length-1):
self.theta = np.append(self.theta, tht[i].flatten())
def update(self, X, Y, a):
'''
Backpropagates and adjust weights based on assigned blame
Arguments:
X is a d-dimentional vector of inputs
Y is a truth set of outputs
a is the predicted outputs from propagation
'''
layers = self.layers
epsilon = self.epsilon
length = len(layers)
# decode v | ector theta
theta = np.empty((length-1),dtype=object)
theta_vec = self.theta
flag = 0
for i in range(0,length-1):
s | ize = layers[i+1] * (layers[i]+1)
theta[i]=theta_vec[flag:(flag+size)].reshape(layers[i+1],-1)
flag = flag+size
# record vector for converge condition
grad = np.empty((length-1), dtype=object)
derivative = np.empty((length-1),dtype=object)
# perform forward propogation
error = np.empty((length),dtype=object)
# activation 0~ l-1, with bias node
activation = self.activation
error[length-1] = a - Y.reshape(-1,1)
# calculate error for each layer
for j in range(length-2,0,-1):
p1 = np.dot(theta[j].T,error[j+1])
p2 = np.multiply(activation[j], (1-activation[j]))
error[j]= np.multiply(p1,p2)
if j > 1:
error[j]= error[j][1:]
#IMPORTANT: This step makes the network only update from non-zero observed outputs. Typical NN's do not do this.
error[length-1][Y==0.0] == 0.0
# calculate gradient
grad[length-2] = np.dot(error[length-1],activation[length-2].T)
for k in range(length-3,-1,-1):
grad[k] = np.dot(error[k+1],activation[k].T)
if k == 0:
grad[k] = grad[k][1:,:]
# compute partial derivative
for i in range(length-2,-1,-1):
row, col = theta[i].shape
temp = np.c_[np.zeros((row,1)),self.regLambda * theta[i][:,1:]]
derivative[i] = grad[i] + temp
# update weights
for i in range(0,length-1):
theta[i] = theta[i] - self.learningRate * derivative[i]
# unroll matrices into a single vector
theta_vec = np.array([])
for i in range(0,length-1):
theta_vec = np.append(theta_vec, theta[i].flatten())
# calculate cost func
self.theta = np.copy(theta_vec)
def propagate(self, X):
'''
Uses the model to predict weighted output values
Arguments:
X is a d-dimenstional numpy array
Returns:
a c-dimensional numpy array of the strength of each output
'''
layers = self.layers
length = len(layers)
# decode vector theta
theta_mat = np.empty((length-1),dtype=object)
flag = 0
for i in range(0,length-1):
size = layers[i+1] * (layers[i]+1)
theta_mat[i]=self.theta[flag:(flag+size)].reshape(layers[i+1],-1)
flag = flag+size
layerX = np.empty((length),dtype=object)
# add bias node to layer0, now 1* d+1
# need to use sigmoid func
layerX[0] = X
layerX[0] = np.append(1,layerX[0]).reshape(-1,1)
for j in range(1,length):
layerX[j] = self.sigmoid(np.dot(theta_mat[j-1],layerX[j-1]))
# add bias node
if j< (length -1) :
layerX[j] = np.append(1,layerX[j]).reshape(-1,1)
# store each layer
self.activation = layerX
output = layerX[length-1]
return output
def sigmoid(self, z):
# return value of sigmoid function
# z: n * 1 vector
M = 1. + np.exp(-1. * z)
result = np.divide(1.,M, dtype=float)
for i in range(0,len(result)):
if result[i] == 0:
result[i] += 0.0001
return result
def propagateAndUpdate(self, x, y):
'''
Used to forward propagate a prediction based on input x, and update against truth y
'''
a = self.propagate(x)
self.update(x, y, a)
|
rawdlite/mopidy | mopidy/mpd/protocol/music_db.py | Python | apache-2.0 | 16,389 | 0 | from __future__ import absolute_import, unicode_literals
import functools
import itertools
from mopidy.internal import deprecation
from mopidy.models import Track
from mopidy.mpd import exceptions, protocol, translator
_SEARCH_MAPPING = {
'album': 'album',
'albumartist': 'albumartist',
'any': 'any',
'artist': 'artist',
'comment': 'comment',
'composer': 'composer',
'date': 'date',
'file': 'uri',
'filename': 'uri',
'genre': 'genre',
'performer': 'performer',
'title': 'track_name',
'track': 'track_no'}
_LIST_MAPPING = {
'title': 'track',
'album': 'album',
'albumartist': 'albumartist',
'artist': 'artist',
'composer': 'composer',
'date': 'date',
'genre': 'genre',
'performer': 'performer'}
_LIST_NAME_MAPPING = {
'track': 'Title',
'album': 'Album',
'albumartist': 'AlbumArtist',
'artist': 'Artist',
'composer': 'Composer',
'date': 'Date',
'genre': 'Genre',
'performer': 'Performer'}
def _query_from_mpd_search_parameters(parameters, mapping):
query = {}
parameters = list(parameters)
while parameters:
# TODO: does it matter that this is now case insensitive
field = mapping.get(parameters.pop(0).lower())
if not field:
raise exceptions.MpdArgError('incorrect arguments')
if not parameters:
raise ValueError
value = parameters.pop(0)
if value.strip():
query.setdefault(field, []).append(value)
return query
def _get_field(field, search_results):
return list(itertools.chain(*[getattr(r, field) for r in search_results]))
_get_albums = functools.partial(_get_field, 'albums')
_get_artists = functools.partial(_get_field, 'artists')
_get_tracks = functools.partial(_get_field, 'tracks')
def _album_as_track(album):
return Track(
uri=album.uri,
name='Album: ' + album.name,
artists=album.artists,
album=album,
date=album.date)
def _artist_as_track(artist):
return Track(
uri=artist.uri,
name='Artist: ' + artist.name,
artists=[artist])
@protocol.commands.add('count')
def count(context, *args):
"""
*musicpd.org, music database section:*
``count {TAG} {NEEDLE}``
Counts the number of songs and their total playtime in the db
matching ``TAG`` exactly.
*GMPC:*
- does not add quotes around the tag argument.
- use multiple tag-needle pairs to make more specific searches.
"""
try:
query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING)
except ValueError:
raise exceptions.MpdArgError('incorrect arguments')
results = context.core.library.search(query=query, exact=True).get()
result_tracks = _get_tracks(results)
return [
('songs', len(result_tracks)),
('playtime', sum(t.length for t in result_tracks if t.length) / 1000),
]
@protocol.commands.add('find')
def find(context, *args):
"""
*musicpd.org, music database section:*
``find {TYPE} {WHAT}``
Finds songs in the db that are exactly ``WHAT``. ``TYPE`` can be any
tag supported by MPD, or one of the two special parameters - ``file``
to search by full path (relative to database root), and ``any`` to
match against all available tags. ``WHAT`` is what to find.
*GMPC:*
- does not add quotes around the field argument.
- also uses ``find album "[ALBUM]" artist "[ARTIST]"`` to list album
tracks.
*ncmpc:*
- does not add quotes around the field argument.
- capitalizes the type argument.
*ncmpcpp:*
- also uses the search type "date".
- uses "file" instead of "filename".
"""
try:
query = | _query_from_mpd_search_parameters(args, _SEARCH_MAPPING)
except ValueError:
return
with deprecation.ignore('core.library.search:empty_query'):
results = context.core.library.search(query=query, exact=True).get()
result_tracks = []
if ('artist' not in query and
'albumartist' not in query and
'composer' not in query and
| 'performer' not in query):
result_tracks += [_artist_as_track(a) for a in _get_artists(results)]
if 'album' not in query:
result_tracks += [_album_as_track(a) for a in _get_albums(results)]
result_tracks += _get_tracks(results)
return translator.tracks_to_mpd_format(result_tracks)
@protocol.commands.add('findadd')
def findadd(context, *args):
"""
*musicpd.org, music database section:*
``findadd {TYPE} {WHAT}``
Finds songs in the db that are exactly ``WHAT`` and adds them to
current playlist. Parameters have the same meaning as for ``find``.
"""
try:
query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING)
except ValueError:
return
results = context.core.library.search(query=query, exact=True).get()
with deprecation.ignore('core.tracklist.add:tracks_arg'):
# TODO: for now just use tracks as other wise we have to lookup the
# tracks we just got from the search.
context.core.tracklist.add(tracks=_get_tracks(results)).get()
@protocol.commands.add('list')
def list_(context, *args):
"""
*musicpd.org, music database section:*
``list {TYPE} [ARTIST]``
Lists all tags of the specified type. ``TYPE`` should be ``album``,
``artist``, ``albumartist``, ``date``, or ``genre``.
``ARTIST`` is an optional parameter when type is ``album``,
``date``, or ``genre``. This filters the result list by an artist.
*Clarifications:*
The musicpd.org documentation for ``list`` is far from complete. The
command also supports the following variant:
``list {TYPE} {QUERY}``
Where ``QUERY`` applies to all ``TYPE``. ``QUERY`` is one or more pairs
of a field name and a value. If the ``QUERY`` consists of more than one
pair, the pairs are AND-ed together to find the result. Examples of
valid queries and what they should return:
``list "artist" "artist" "ABBA"``
List artists where the artist name is "ABBA". Response::
Artist: ABBA
OK
``list "album" "artist" "ABBA"``
Lists albums where the artist name is "ABBA". Response::
Album: More ABBA Gold: More ABBA Hits
Album: Absolute More Christmas
Album: Gold: Greatest Hits
OK
``list "artist" "album" "Gold: Greatest Hits"``
Lists artists where the album name is "Gold: Greatest Hits".
Response::
Artist: ABBA
OK
``list "artist" "artist" "ABBA" "artist" "TLC"``
Lists artists where the artist name is "ABBA" *and* "TLC". Should
never match anything. Response::
OK
``list "date" "artist" "ABBA"``
Lists dates where artist name is "ABBA". Response::
Date:
Date: 1992
Date: 1993
OK
``list "date" "artist" "ABBA" "album" "Gold: Greatest Hits"``
Lists dates where artist name is "ABBA" and album name is "Gold:
Greatest Hits". Response::
Date: 1992
OK
``list "genre" "artist" "The Rolling Stones"``
Lists genres where artist name is "The Rolling Stones". Response::
Genre:
Genre: Rock
OK
*GMPC:*
- does not add quotes around the field argument.
*ncmpc:*
- does not add quotes around the field argument.
- capitalizes the field argument.
"""
params = list(args)
if not params:
raise exceptions.MpdArgError('incorrect arguments')
field = params.pop(0).lower()
field = _LIST_MAPPING.get(field)
if field is None:
raise exceptions.MpdArgError('incorrect arguments')
query = None
if len(params) == 1:
if field != 'album':
raise exceptions.MpdArgError('should |
spiside/cqlsh | cqlshlib/copyutil.py | Python | apache-2.0 | 95,453 | 0.003499 | # cython: profile=True
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ConfigParser
import csv
import datetime
import json
import glob
import multiprocessing as mp
import os
import platform
import random
import re
import struct
import sys
import time
import traceback
from bisect import bisect_right
from calendar import timegm
from collections import defaultdict, namedtuple
from decimal import Decimal
from random import randrange
from StringIO import StringIO
from select import select
from threading import Lock
from uuid import UUID
from util import profile_on, profile_off
from cassandra.cluster import Cluster
from cassandra.cqltypes import ReversedType, UserType
from cassandra.metadata import protect_name, protect_names, protect_value
from cassandra.policies import RetryPolicy, WhiteListRoundRobinPolicy, DCAwareRoundRobinPolicy
from cassandra.query import BatchStatement, BatchType, SimpleStatement, tuple_factory
from cassandra.util import Date, Time
from cql3handling import CqlRuleSet
from displaying import NO_COLOR_MAP
from formatting import format_value_default, DateTimeFormat, EMPTY, get_formatter
from sslhandling import ssl_settings
PROFILE_ON = False
STRACE_ON = False
IS_LINUX = platform.system() == 'Linux'
CopyOptions = namedtuple('CopyOptions', 'copy dialect unrecognized')
def safe_normpath(fname):
"""
:return the normalized path but only if there is a filename, we don't want to convert
an empty string (which means no file name) to a dot. Also expand any user variables such as ~ to the full path
"""
return os.path.normpath(os.path.expanduser(fname)) if fname else fname
class OneWayChannel(object):
"""
A one way pipe protected by two process level locks, one for reading and one for writing.
"""
def __init__(self):
self.reader, self.writer = mp.Pipe(duplex=False)
self.rlock = mp.Lock()
self.wlock = mp.Lock()
def send(self, obj):
with self.wlock:
| self.writer.send(obj)
def recv(self):
with self.rlock:
return self.reader.recv()
def close(self):
self.reader.close()
self.writer.close()
class OneWayChannels(object):
"""
A group of one way channels.
"""
def __init__(self, num_channels):
self.channels = [OneWayChannel() for | _ in xrange(num_channels)]
self._readers = [ch.reader for ch in self.channels]
self._rlocks = [ch.rlock for ch in self.channels]
self._rlocks_by_readers = dict([(ch.reader, ch.rlock) for ch in self.channels])
self.num_channels = num_channels
self.recv = self.recv_select if IS_LINUX else self.recv_polling
def recv_select(self, timeout):
"""
Implementation of the recv method for Linux, where select is available. Receive an object from
all pipes that are ready for reading without blocking.
"""
readable, _, _ = select(self._readers, [], [], timeout)
for r in readable:
with self._rlocks_by_readers[r]:
try:
yield r.recv()
except EOFError:
continue
def recv_polling(self, timeout):
"""
Implementation of the recv method for platforms where select() is not available for pipes.
We poll on all of the readers with a very small timeout. We stop when the timeout specified
has been received but we may exceed it since we check all processes during each sweep.
"""
start = time.time()
while True:
for i, r in enumerate(self._readers):
with self._rlocks[i]:
if r.poll(0.000000001):
try:
yield r.recv()
except EOFError:
continue
if time.time() - start > timeout:
break
def close(self):
for ch in self.channels:
try:
ch.close()
except:
pass
class CopyTask(object):
"""
A base class for ImportTask and ExportTask
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, direction):
self.shell = shell
self.ks = ks
self.table = table
self.local_dc = shell.conn.metadata.get_host(shell.hostname).datacenter
self.fname = safe_normpath(fname)
self.protocol_version = protocol_version
self.config_file = config_file
# do not display messages when exporting to STDOUT
self.printmsg = self._printmsg if self.fname is not None or direction == 'from' else lambda _, eol='\n': None
self.options = self.parse_options(opts, direction)
self.num_processes = self.options.copy['numprocesses']
if direction == 'in':
self.num_processes += 1 # add the feeder process
self.printmsg('Using %d child processes' % (self.num_processes,))
self.processes = []
self.inmsg = OneWayChannels(self.num_processes)
self.outmsg = OneWayChannels(self.num_processes)
self.columns = CopyTask.get_columns(shell, ks, table, columns)
self.time_start = time.time()
@staticmethod
def _printmsg(msg, eol='\n'):
sys.stdout.write(msg + eol)
sys.stdout.flush()
def maybe_read_config_file(self, opts, direction):
"""
Read optional sections from a configuration file that was specified in the command options or from the default
cqlshrc configuration file if none was specified.
"""
config_file = opts.pop('configfile', '')
if not config_file:
config_file = self.config_file
if not os.path.isfile(config_file):
return opts
configs = ConfigParser.RawConfigParser()
configs.readfp(open(config_file))
ret = dict()
config_sections = list(['copy', 'copy-%s' % (direction,),
'copy:%s.%s' % (self.ks, self.table),
'copy-%s:%s.%s' % (direction, self.ks, self.table)])
for section in config_sections:
if configs.has_section(section):
options = dict(configs.items(section))
self.printmsg("Reading options from %s:[%s]: %s" % (config_file, section, options))
ret.update(options)
# Update this last so the command line options take precedence over the configuration file options
if opts:
self.printmsg("Reading options from the command line: %s" % (opts,))
ret.update(opts)
if self.shell.debug: # this is important for testing, do not remove
self.printmsg("Using options: '%s'" % (ret,))
return ret
@staticmethod
def clean_options(opts):
"""
Convert all option values to valid string literals unless they are path names
"""
return dict([(k, v.decode('string_escape') if k not in ['errfile', 'ratefile'] else v)
for k, v, in opts.iteritems()])
def parse_options(self, opts, direction):
"""
Parse options for import (COPY FROM) and export (COPY TO) operations.
Extract from opts csv and dialect options.
:return: 3 dictionaries: the csv options, the dialect options, any unrecognized options.
"""
shell = se |
etingof/pysnmp | pysnmp/smi/mibs/PYSNMP-USM-MIB.py | Python | bsd-2-clause | 14,623 | 0.000068 | #
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysnmp/license.html
#
# ASN.1 source http://mibs.snmplabs.com:80/asn1/PYSNMP-USM-MIB
# Produced by pysmi-0.4.0 at Thu Feb 14 23:15:36 2019
#
if 'mibBuilder' not in globals():
import sys
sys.stderr.write(__doc__)
sys.exit(1)
(Integer,
OctetString,
ObjectIdentifier) = mibBuilder.importSymbols(
"ASN1",
"Integer",
"OctetString",
"ObjectIdentifier")
(NamedValues,) = mibBuilder.importSymbols(
"ASN1-ENUMERATION",
"NamedValues")
(ConstraintsIntersection,
SingleValueConstraint,
ValueRangeConstraint,
ValueSizeConstraint,
ConstraintsUnion) = mibBuilder.importSymbols(
"ASN1-REFINEMENT",
"ConstraintsIntersection",
"SingleValueConstraint",
"ValueRangeConstraint",
"ValueSizeConstraint",
"ConstraintsUnion")
(pysnmpModuleIDs,) = mibBuilder.importSymbols(
"PYSNMP-MIB",
"pysnmpModuleIDs")
(SnmpAdminString,) = mibBuilder.importSymbols(
"SNMP-FRAMEWORK-MIB",
"SnmpAdminString")
(usmUserEntry,) = mibBuilder.importSymbols(
"SNMP-USER-BASED-SM-MIB",
"usmUserEntry")
(ModuleCompliance,
NotificationGroup) = mibBuilder.importSymbols(
"SNMPv2-CONF",
"ModuleCompliance",
"NotificationGroup")
(Bits,
NotificationType,
Counter64,
Gauge32,
ObjectIdentity,
Unsigned32,
IpAddress,
MibIdentifier,
Counter32,
MibScalar,
MibTable,
MibTableRow,
MibTableColumn,
ModuleIdentity,
iso,
TimeTicks,
Integer32) = mibBuilder.importSymbols(
"SNMPv2-SMI",
"Bits",
"NotificationType",
"Counter64",
"Gauge32",
"ObjectIdentity",
"Unsigned32",
"IpAddress",
"MibIdentifier",
"Counter32",
"MibScalar",
"MibTable",
"MibTableRow",
"MibTableColumn",
"ModuleIdentity",
"iso",
"TimeTicks",
"Integer32")
(TextualConvention,
RowStatus,
DisplayString) = mibBuilder.importSymbols(
"SNMPv2-TC",
"TextualConvention",
"RowStatus",
"DisplayString")
pysnmpUsmMIB = ModuleIdentity(
(1, 3, 6, 1, 4, 1, 20408, 3, 1, 1)
)
pysnmpUsmMIB.setRevisions(
("2017-04-14 00:00",
"2005-05-14 00:00")
)
pysnmpUsmMIB.setLastUpdated("201704140000Z")
if mibBuilder.loadTexts:
pysnmpUsmMIB.setOrganization("""\
The PySNMP Project
""")
pysnmpUsmMI | B.setContactInfo("""\
E-mail: Ilya Etingof <etingof@gmail.com> GitHub:
https://github.com/etingof/pysnmp
""")
if mibBuilder.loadTexts:
pysnmpUsmMIB.setDescription("""\
This MIB module defines objects specific to User Security Model (USM)
implementation at PySNMP.
""")
_PysnmpUsmMIBObjects_ObjectIdentity = ObjectIdentity
pysnmpUsmMIBObjects = _PysnmpUsmMIBObjects_Obj | ectIdentity(
(1, 3, 6, 1, 4, 1, 20408, 3, 1, 1, 1)
)
_PysnmpUsmCfg_ObjectIdentity = ObjectIdentity
pysnmpUsmCfg = _PysnmpUsmCfg_ObjectIdentity(
(1, 3, 6, 1, 4, 1, 20408, 3, 1, 1, 1, 1)
)
class _PysnmpUsmDiscovery_Type(Integer32):
defaultValue = 1
subtypeSpec = Integer32.subtypeSpec
subtypeSpec += ConstraintsUnion(
SingleValueConstraint(
*(0,
1)
)
)
namedValues = NamedValues(
*(("doDiscover", 1),
("doNotDiscover", 0))
)
_PysnmpUsmDiscovery_Type.__name__ = "Integer32"
_PysnmpUsmDiscovery_Object = MibScalar
pysnmpUsmDiscovery = _PysnmpUsmDiscovery_Object(
(1, 3, 6, 1, 4, 1, 20408, 3, 1, 1, 1, 1, 2),
_PysnmpUsmDiscovery_Type()
)
pysnmpUsmDiscovery.setMaxAccess("read-write")
if mibBuilder.loadTexts:
pysnmpUsmDiscovery.setStatus("current")
if mibBuilder.loadTexts:
pysnmpUsmDiscovery.setDescription("""\
Whether SNMP engine would try to figure out the EngineIDs of its peers by
sending discover requests.
""")
class _PysnmpUsmDiscoverable_Type(Integer32):
defaultValue = 1
subtypeSpec = Integer32.subtypeSpec
subtypeSpec += ConstraintsUnion(
SingleValueConstraint(
*(0,
1)
)
)
namedValues = NamedValues(
*(("discoverable", 1),
("notDiscoverable", 0))
)
_PysnmpUsmDiscoverable_Type.__name__ = "Integer32"
_PysnmpUsmDiscoverable_Object = MibScalar
pysnmpUsmDiscoverable = _PysnmpUsmDiscoverable_Object(
(1, 3, 6, 1, 4, 1, 20408, 3, 1, 1, 1, 1, 1),
_PysnmpUsmDiscoverable_Type()
)
pysnmpUsmDiscoverable.setMaxAccess("read-write")
if mibBuilder.loadTexts:
pysnmpUsmDiscoverable.setStatus("current")
if mibBuilder.loadTexts:
pysnmpUsmDiscoverable.setDescription("""\
Whether SNMP engine would support its discovery by responding to unknown
clients.
""")
class _PysnmpUsmKeyType_Type(Integer32):
defaultValue = 0
subtypeSpec = Integer32.subtypeSpec
subtypeSpec += ConstraintsUnion(
SingleValueConstraint(
*(0,
1,
2)
)
)
namedValues = NamedValues(
*(("passphrase", 0),
("master", 1),
("localized", 2))
)
_PysnmpUsmKeyType_Type.__name__ = "Integer32"
_PysnmpUsmKeyType_Object = MibScalar
pysnmpUsmKeyType = _PysnmpUsmKeyType_Object(
(1, 3, 6, 1, 4, 1, 20408, 3, 1, 1, 1, 1, 3),
_PysnmpUsmKeyType_Type()
)
pysnmpUsmKeyType.setMaxAccess("not-accessible")
if mibBuilder.loadTexts:
pysnmpUsmKeyType.setStatus("current")
if mibBuilder.loadTexts:
pysnmpUsmKeyType.setDescription("""\
When configuring USM user, the value of this enumeration
determines how the keys should be treated. The default
value "passphrase" means that given keys are plain-text
pass-phrases, "master" indicates that the keys are pre-hashed
pass-phrases, while "localized" stands for pre-hashed
pass-phrases mixed with SNMP Security Engine ID value.
""")
_PysnmpUsmSecretTable_Object = MibTable
pysnmpUsmSecretTable = _PysnmpUsmSecretTable_Object(
(1, 3, 6, 1, 4, 1, 20408, 3, 1, 1, 1, 2)
)
if mibBuilder.loadTexts:
pysnmpUsmSecretTable.setStatus("current")
if mibBuilder.loadTexts:
pysnmpUsmSecretTable.setDescription("""\
The table of USM users passphrases configured in the SNMP engine's Local
Configuration Datastore (LCD).
""")
_PysnmpUsmSecretEntry_Object = MibTableRow
pysnmpUsmSecretEntry = _PysnmpUsmSecretEntry_Object(
(1, 3, 6, 1, 4, 1, 20408, 3, 1, 1, 1, 2, 1)
)
pysnmpUsmSecretEntry.setIndexNames(
(1, "PYSNMP-USM-MIB", "pysnmpUsmSecretUserName"),
)
if mibBuilder.loadTexts:
pysnmpUsmSecretEntry.setStatus("current")
if mibBuilder.loadTexts:
pysnmpUsmSecretEntry.setDescription("""\
Information about a particular USM user credentials.
""")
class _PysnmpUsmSecretUserName_Type(SnmpAdminString):
subtypeSpec = SnmpAdminString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(1, 32),
)
_PysnmpUsmSecretUserName_Type.__name__ = "SnmpAdminString"
_PysnmpUsmSecretUserName_Object = MibTableColumn
pysnmpUsmSecretUserName = _PysnmpUsmSecretUserName_Object(
(1, 3, 6, 1, 4, 1, 20408, 3, 1, 1, 1, 2, 1, 1),
_PysnmpUsmSecretUserName_Type()
)
pysnmpUsmSecretUserName.setMaxAccess("not-accessible")
if mibBuilder.loadTexts:
pysnmpUsmSecretUserName.setStatus("current")
if mibBuilder.loadTexts:
pysnmpUsmSecretUserName.setDescription("""\
The username string for which a row in this table represents a configuration.
""")
class _PysnmpUsmSecretAuthKey_Type(OctetString):
subtypeSpec = OctetString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(8, 65535),
)
defaultHexValue = '0000000000000000'
_PysnmpUsmSecretAuthKey_Type.__name__ = "OctetString"
_PysnmpUsmSecretAuthKey_Object = MibTableColumn
pysnmpUsmSecretAuthKey = _PysnmpUsmSecretAuthKey_Object(
(1, 3, 6, 1, 4, 1, 20408, 3, 1, 1, 1, 2, 1, 2),
_PysnmpUsmSecretAuthKey_Type()
)
pysnmpUsmSecretAuthKey.setMaxAccess("not-accessible")
if mibBuilder.loadTexts:
pysnmpUsmSecretAuthKey.setStatus("current")
if mibBuilder.loadTexts:
pysnmpUsmSecretAuthKey.setDescription("""\
User's authentication passphrase used for localized key generation.
""")
class _PysnmpUsmSecretPrivKey_Type(OctetString):
subtypeSpec = OctetString.subtypeSpec
subtypeSpec += ConstraintsUnion(
ValueSizeConstraint(8, 65535),
)
defaultHexValue = ' |
appsembler/edx-platform | lms/djangoapps/teams/tests/test_api.py | Python | agpl-3.0 | 16,102 | 0.003167 | # -*- coding: utf-8 -*-
"""
Tests for Python APIs of the Teams app
"""
from uuid import uuid4
import ddt
import mock
from opaque_keys.edx.keys import CourseKey
from course_modes.models import CourseMode
from lms.djangoapps.teams import api as teams_api
from lms.djangoapps.teams.models import CourseTeam
from lms.djangoapps.teams.tests.factories import CourseTeamFactory
from openedx.core.lib.teams_config import TeamsConfig, TeamsetType
from student.models import CourseEnrollment
from student.roles import CourseStaffRole
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
COURSE_KEY1 = CourseKey.from_string('edx/history/1')
COURSE_KEY2 = CourseKey.from_string('edx/math/1')
TOPIC1 = 'topic-1'
TOPIC2 = 'topic-2'
TOPIC3 = 'topic-3'
DISCUSSION_TOPIC_ID = uuid4().hex
@ddt.ddt
class PythonAPITests(SharedModuleStoreTestCase):
"""
The set of tests for different API endpoints
"""
@classmethod
def setUpClass(cls):
super(PythonAPITests, cls).setUpClass()
cls.user1 = UserFactory.create(username='user1')
cls.user2 = UserFactory.create(username='user2')
cls.user3 = UserFactory.create(username='user3')
cls.user4 = UserFactory.create(username='user4')
topic_data = [
(TOPIC1, TeamsetType.private_managed.value),
(TOPIC2, TeamsetType.open.value),
(TOPIC3, TeamsetType.public_managed.value)
]
topics = [
{
'id': topic_id,
'name': 'name-' + topic_id,
'description': 'desc-' + topic_id,
'type': teamset_type
} for topic_id, teamset_type in topic_data
]
teams_config_1 = TeamsConfig({'topics': [topics[0]]})
teams_config_2 = TeamsConfig({'topics': [topics[1], topics[2]]})
cls.course1 = CourseFactory(
org=COURSE_KEY1.org,
course=COURSE_KEY1.course,
run=COURSE_KEY1.run,
teams_configuration=teams_config_1,
)
cls.course2 = CourseFactory(
org=COURSE_KEY2.org,
course=COURSE_KEY2.course,
run=COURSE_KEY2.run,
teams_configuration=teams_config_2,
)
for user in (cls.user1, cls.user2, cls.user3, cls.user4):
CourseEnrollmentFactory.create(user=user, course_id=COURSE_KEY1)
for user in (cls.user3, cls.user4):
CourseEnrollmentFactory.create(user=user, course_id=COURSE_KEY2)
cls.team1 = CourseTeamFactory(
course_id=COURSE_KEY1,
discussion_topic_id=DISCUSSION_TOPIC_ID,
team_id='team1',
topic_id=TOPIC1,
)
cls.team1a = CourseTeamFactory( # Same topic / team set as team1
course_id=COURSE_KEY1,
team_id='team1a',
topic_id=TOPIC1,
)
cls.team2 = CourseTeamFactory(course_id=COURSE_KEY2, team_id='team2', topic_id=TOPIC2)
cls.team2a = CourseTeamFactory( # Same topic / team set as team2
course_id=COURSE_KEY2,
team_id='team2a',
topic_id=TOPIC2
)
cls.team3 = CourseTeamFactory(course_id=COURSE_KEY2, team_id='team3', topic_id=TOPIC3)
cls.team1.add_user(cls.user1)
cls.team1.add_user(cls.user2)
cls.team2.add_user(cls.user3)
cls.team1a.add_user(cls.user4)
cls.team2a.add_user(cls.user4)
def test_get_team_by_discussion_non_existence(self):
self.assertIsNone(teams_api.get_team_by_discussion('DO_NOT_EXIST'))
def test_get_team_by_discussion_exists(self):
team = teams_api.get_team_by_discussion(DISCUSSION_TOPIC_ID)
self.assertEqual(team, self.team1)
def test_is_team_discussion_private_is_private(self):
self.assertTrue(teams_api.is_team_discussion_private(self.team1))
def test_is_team_discussion_private_is_public(self):
self.assertFalse(teams_api.is_team_discussion_private(None))
self.assertFalse(teams_api.is_team_discussion_private(self.team2))
self.assertFalse(teams_api.is_team_discussion_private(self.team3))
def test_is_instructor_managed_team(self):
self.assertTrue(teams_api.is_instructor_managed_team(self.team1))
self.assertFalse(teams_api.is_instructor_managed_team(self.team2))
self.assertTrue(teams_api.is_instructor_managed_team(self.team3))
def test_is_instructor_managed_topic(self):
self.assertTrue(teams_api.is_instructor_managed_topic(COURSE_KEY1, TOPIC1))
self.assertFalse(teams_api.is_instructor_managed_topic(COURSE_KEY2, TOPIC2))
self.assertTrue(teams_api.is_instructor_managed_topic(COURSE_KEY2, TOPIC3))
def test_user_is_a_team_member(self):
self.assertTrue(teams_api.user_is_a_team_member(self.user1, self.team1))
self.assertFalse(teams_api.user_is_a_team_member(self.user1, None))
self.assertFalse(teams_api.user_is_a_team_member(self.user1, self.team2))
def test_private_discussion_visible_by_user(self):
self.assertTrue(teams_api.discussion_visible_by_user(DISCUSSION_TOPIC_ID, self.user1))
self.assertTrue(teams_api.discussion_visible_by_user(DISCUSSION_TOPIC_ID, self.user2))
# self.assertFalse(teams_api.discussion_visible_by_user(DISCUSSION_TOPIC_ID, self.user3))
def test_public_discussion_visible_by_user(self):
self.assertTrue(teams_api.discussion_visible_by_user(self.team2.discussion_topic_id, self.user1))
self.as | sertTrue(teams_api.discussion_visible_by_user(self.team2.discussion_topic_id, self.user2))
self.assertTrue(teams_api.discussion_visible_by_user('DO_NOT_EXISTS', self.user3))
@ddt.unpack
@ddt.data(
(COURSE_KEY1, TOPIC1, ['team1', 'team1', None, 'team1a']),
(COURSE_KEY1, TOPIC2, [None, None, None, None]),
(COURSE_KEY2, TOPIC1, [None, No | ne, None, None]),
(COURSE_KEY2, TOPIC2, [None, None, 'team2', 'team2a']),
)
def test_get_team_for_user_course_topic(self, course_key, topic_id, expected_team_ids):
user1_team = teams_api.get_team_for_user_course_topic(self.user1, str(course_key), topic_id)
user2_team = teams_api.get_team_for_user_course_topic(self.user2, str(course_key), topic_id)
user3_team = teams_api.get_team_for_user_course_topic(self.user3, str(course_key), topic_id)
user4_team = teams_api.get_team_for_user_course_topic(self.user4, str(course_key), topic_id)
self.assertEqual(user1_team.team_id if user1_team else None, expected_team_ids[0])
self.assertEqual(user2_team.team_id if user2_team else None, expected_team_ids[1])
self.assertEqual(user3_team.team_id if user3_team else None, expected_team_ids[2])
self.assertEqual(user4_team.team_id if user4_team else None, expected_team_ids[3])
@mock.patch('lms.djangoapps.teams.api.CourseTeam.objects')
def test_get_team_multiple_teams(self, mocked_manager):
"""
This is a test for a use case that is very unlikely to occur.
Currently users cannot be in multiple teams in a course, but even after we allow multiple
teams in a course then they should still be limited to one team per topic
"""
mocked_manager.get.side_effect = CourseTeam.MultipleObjectsReturned()
expected_result = "This is somehow the first team"
mock_qs = mock.MagicMock()
mock_qs.first.return_value = expected_result
mocked_manager.filter.return_value = mock_qs
result = teams_api.get_team_for_user_course_topic(self.user1, str(COURSE_KEY1), TOPIC1)
self.assertEqual(result, expected_result)
def test_get_team_course_not_found(self):
team = teams_api.get_team_for_user_course_topic(self.user1, 'nonsense/garbage/nonexistant', 'topic')
self.assertIsNone(team)
def test_get_team_invalid_course(self):
invalid_course_id = 'lol!()#^$&course'
message = 'The supplied course id lol!()#^$&course is not valid'
with self.assertRaise |
bhdouglass/agui | agui/backends/gtk/widgets/button.py | Python | gpl-3.0 | 1,417 | 0.003529 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2014 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from agui.awidgets import AButton
from agui.backends.gtk.widgets import Widget
class Button(Widget, AButton):
type = 'Button'
def __init__(self, item = None):
AButton.__init__(self, item)
Widget.__init__(self, item)
self.item.connect('button-press-e | vent', self.emit_pressed)
@AButton.text.gette | r
def text(self):
self._text = self.item.get_label()
return self._text
@text.setter
def text(self, value):
self.item.set_label(value)
self._text = value
@AButton.icon.setter
def icon(self, value):
self.item.set_image(value.icon())
self._icon = value
|
jadams/rarbg-get | rarbg-get.py | Python | mit | 409 | 0.007335 | #!env /usr/bin/python3
import sys
import urllib.parse
import urllib. | request
def main():
search = sys.argv[1]
url = 'http://rarbg.to/torrents.php?order=seeders&by=DESC&search='
| url = url + search
print(url)
req = urllib.request.Request(url, headers={'User-Agent' : "Magic Browser"})
resp = urllib.request.urlopen(req)
respData = resp.read()
if __name__ == '__main__':
main()
|
PublicaMundi/pycsw | pycsw/log.py | Python | mit | 3,506 | 0 | # -*- coding: iso-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2011 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
LOGGER = logging.getLogger(__name__)
MSG_FORMAT = '%(asctime)s] [%(levelname)s] file=%(pathname)s \
line=%(lineno)s module=%(module)s function=%(funcName)s %(message)s'
TIME_FORMAT = '%a, %d %b %Y %H:%M:%S'
LOGLEVELS = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET,
}
def setup_logger(config=None):
"""Initialize logging facility"""
if config is None:
return None
# Do not proceed if logging has not been set up.
if not (config.has_option('server', 'loglevel') or
config.has_option('server', 'logfile')):
return None
logfile = None
loglevel = 'NOTSET'
if config.has_option('server', 'loglevel'):
loglevel = config.get('server', 'loglevel')
if loglevel not in LOGLEVELS.keys():
raise RuntimeError(
'Invalid server configuration (server | .loglevel).')
if not config.has_option('server', 'logfile'):
raise RuntimeError(
'Invalid server configuration (server.loglevel set,\
but server.logfile missing).')
if config.has_option('server', 'logfile'):
if not config.has_option('server', 'loglevel'):
raise RuntimeError(
'Invalid server co | nfiguration (server.logfile set,\
but server.loglevel missing).')
logfile = config.get('server', 'logfile')
if loglevel != 'NOTSET' and logfile is None:
raise RuntimeError(
'Invalid server configuration \
(server.loglevel set, but server.logfile is not).')
# Setup logging globally (not only for the pycsw module)
# based on the parameters passed.
logging.basicConfig(level=LOGLEVELS[loglevel],
filename=logfile,
datefmt=TIME_FORMAT,
format=MSG_FORMAT)
LOGGER.info('Logging initialized (level: %s).' % loglevel)
if loglevel == 'DEBUG': # turn on CGI debugging
LOGGER.info('CGI debugging enabled.')
import cgitb
cgitb.enable()
|
otherway/sepa-tools | account_payment_export/model/payment_mode.py | Python | agpl-3.0 | 1,830 | 0.001093 | # -*- coding: utf-8 -*-
######################################## | ######################################
#
# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
# (C) 2011 - 2013 Therp BV (<http://therp.nl>).
#
# All other contributions are (C) by their respective contributo | rs
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class payment_mode(orm.Model):
''' Restoring the payment type from version 5,
used to select the export wizard (if any) '''
_inherit = "payment.mode"
def suitable_bank_types(self, cr, uid, payment_mode_id=None, context=None):
""" Reinstates functional code for suitable bank type filtering.
Current code in account_payment is disfunctional.
"""
res = []
payment_mode = self.browse(
cr, uid, payment_mode_id, context)
if (payment_mode and payment_mode.type and
payment_mode.type.suitable_bank_types):
res = [t.code for t in payment_mode.type.suitable_bank_types]
return res
|
ttakamura/chainer | examples/imagenet/train_imagenet.py | Python | mit | 9,557 | 0.002616 | #!/usr/bin/env python
"""Example code of learning a large scale convnet from ILSVRC2012 dataset.
Prerequisite: To run this example, crop the center of ILSVRC2012 training and
validation images and scale them to 256x256, and make two lists of space-
separated CSV whose first column is full path to image and second column is
zero-origin label (this format is same as that used by Caffe's ImageDataLayer).
"""
import argparse
import cPickle as pickle
from datetime import timedelta
import json
import math
from multiprocessing import Pool
from Queue import Queue
import random
import sys
from threading import Thread
import time
import cv2
import numpy as np
from chainer import cuda, Variable, FunctionSet, optimizers
import chainer.functions as F
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('train', help='Path to training image-label list file')
parser.add_argument('val', help='Path to validation image-label list file')
parser.add_argument('--mean', '-m', default='mean.npy',
help='Path to the mean file (computed by compute_mean.py)')
parser.add_argument('--arch', '-a', default='nin',
help='Convnet architecture (nin, alexbn, googlenet, googlenetbn)')
parser.add_argument('--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument('--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.add_argument('--epoch', '-E', default=10, type=int,
help='Number of epochs to learn')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--loaderjob', '-j', default=20, type=int,
help='Number of parallel data loading processes')
parser.add_argument('--out', '-o', default='model',
help='Path to save model on each validation')
args = parser.parse_args()
assert 50000 % args.val_batchsize == 0
# Prepare dataset
def load_image_list(path):
tuples = []
for line in open(path):
pair = line.strip().split()
tuples.append((pair[0], np.int32(pair[1])))
return tuples
train_list = load_image_list(args.train)
val_list = load_image_list(args.val)
mean_image = pickle.load(open(args.mean, 'rb'))
# Prepare model
if args.arch == 'nin':
import nin
model = nin.NIN()
elif args.arch == 'alexbn':
import alexbn
model = alexbn.AlexBN()
elif args.arch == 'googlenet':
import inception
model = inception.GoogLeNet()
elif args.arch == 'googlenetbn':
import inceptionbn
model = inceptionbn.GoogLeNetBN()
else:
raise ValueError('Invalid architecture name')
if args.gpu >= 0:
cuda.init(args.gpu)
model.to_gpu()
# Setup optimizer
optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model.collect_parameters())
# ------------------------------------------------------------------------------
# This example consists of three threads: data feeder, logger and trainer. These
# communicate with each other via Queue.
data_q = Queue(maxsize=1)
res_q = Queue()
# Data loading routine
cropwidth = 256 - model.insize
def read_image(path, center=False, flip=False):
image = cv2.imread(path).transpose(2, 0, 1)
if center:
top = left = cropwidth / 2
else:
top = random.randint(0, cropwidth - 1)
left = random.randint(0, cropwidth - 1)
bottom = model.insize + top
right = model.insize + left
image = image[[2, 1, 0], top:bottom, left:right].astype(np.float32)
image -= mean_image[:, top:bottom, left:right]
image /= 255
if flip and random.randint(0, 1) == 0:
return image[:, :, ::-1]
else:
return image
# Data feeder
def feed_data():
i = 0
count = 0
x_batch = np.ndarray(
(args.batchsize, 3, model.insize, model.insize), dtype=np.float32)
y_batch = np.ndarray((args.batchsize,), dtype=np.int32)
val_x_batch = np.ndarray(
(args.val_batchsize, 3, model.insize, model.insize), dtype=np.float32)
val_y_batch = np.ndarray((args.val_batchsize,), dtype=np.int32)
batch_pool = [None] * args.batchsize
val_batch_pool = [None] * args.val_batchsize
pool = Pool(args.loaderjob)
data_q.put('train')
for epoch in xrange(1, 1 + args.epoch):
print >> sys.stderr, 'epoch', epoch
print >> sys.stderr, 'learning rate', optimizer.lr
perm = np.random.permutation(len(train_list))
for idx in perm:
path, label = train_list[idx]
batch_pool[i] = pool.apply_async(read_image, (path, False, True))
y_batch[i] = label
i += 1
if i == args.batchsize:
for j, x in enumerate(batch_pool):
x_batch[j] = x.get()
data_q.put((x_batch.copy(), y_batch.copy()))
i = 0
count += 1
if count % 100000 == 0:
data_q.put('val')
j = 0
for path, label in val_list:
val_batch_pool[j] = pool.apply_async(
read_image, (path, True, False))
val_y_batch[j] = label
j += 1
if j == args.val_batchsize:
for k, x in enumerate(val_batch_pool):
val_x_batch[k] = x.get()
data_q.put((val_x_batch.copy(), val_y_batch.copy()))
j = 0
data_q.put('train')
optimizer.lr *= 0.97
pool.close()
pool.join()
data_q.put('end')
# Logger
def log_result():
train_count = 0
train_cur_loss = 0
train_cur_accuracy = 0
begin_at = time.time()
val_begin_at = None
while True:
result = res_q.get()
if result == 'end':
print >> sys.stderr, ''
break
elif result == 'train':
print >> sys.stderr, ''
train = True
if val_begin_at is not None:
begin_at += time.time() - val_begin_at
val_begin_at = None
continue
elif result == 'val':
print >> sys.stderr, ''
train = False
val_count = val_loss = val_accuracy = 0
val_begin_at = time.time()
continue
loss, accuracy = result
if train:
train_count += 1
duration = time.time() - begin_at
throughput = train_count * args.batchsize / duration
sys.stderr.write(
'\rtrain {} updates ({} samples) time: {} ({} images/sec)'
.format(train_count, train_count * args.batchsize,
timedelta(seconds=duration), throughput))
train_cur_loss += loss
train_cur_accuracy += accuracy
if train_count % 1000 == 0:
mean_loss = train_cur_loss / 1000
mean_error = 1 - train_cur_accuracy / 1000
print >> sys.stderr, ''
print json.dumps({'type': 'train', 'iteration': train_count,
'error': mean_error, 'loss': mean_loss})
sys.stdout.flush()
train_cur_loss = 0
train_cur_accuracy = 0
else:
val_count += args.val_batchsize
duration = time.time() - val_b | egin_at
throughput = val_count / duration
sys.stderr.write(
'\rval {} batches ({} samples) time: {} ({} images/sec)'
.format(val_count / ar | gs.val_batchsize, val_count,
timedelta(seconds=duration), throughput))
val_loss += loss
val_accuracy += accuracy
if val_count == 50000:
mean_loss = val_loss * args.val_batchsize / 50000
mean_error = 1 - val_accuracy * args.val_batchsize / 50000
print >> sys.stderr, ''
print json.dumps({'type': 'val', 'iteration': train_count |
alladdin/plugin.video.primaplay | libPrimaPlay/PrimaPlay_unittest.py | Python | gpl-2.0 | 12,829 | 0.006255 | # -*- coding: utf-8 -*-
import unittest
import os, sys
import PrimaPlay
import urllib2
os.chdir(os.path.dirname(sys.argv[0]))
user = 'text@example.com';
password = 'password';
class mockTime:
def time(self):
return 1450875766
class mockUserAgent:
def __init__(self, url_map = {}):
self.ua = PrimaPlay.UserAgent()
self.url_map = {
'http://api.play-backend.iprima.cz/prehravac/init?_ts=1450875766&_infuse=1&productId=p135603': lambda url: 'test_player_init.js',
'http://play.iprima.cz/': lambda url: 'test_homepage.html',
'http://play.iprima.cz': lambda url: 'test_homepage.html',
'http://play.iprima.cz/prostreno': lambda url: 'test_filters.html',
'http://play.iprima.cz/vysledky-hledani-vse?query=prostreno': lambda url: 'test_search_page.html',
'http://play.iprima.cz/prostreno-IX-9': lambda url: 'test_video_page.html',
'http://play.iprima.cz/moje-play': lambda url: 'test_moje_play.html',
'https://play.iprima.cz/tdi/login/nav/form?csrfToken=868668da5dd5d622ddee5738cf226523ccc6b708-1451918185394-55fbc39b6ea5a369d8723b76': lambda url: 'test_homepage_logged.html',
'http://play.iprima.cz/prostreno?cat[]=EPISODE&src=p14877&sort[]=Rord&sort[]=latest': lambda url: 'test_prostreno_epizody.html',
'http://prima-vod-prep.service.cdn.cra.cz/vod_Prima/_definst_/0000/5314/cze-ao-sd1-sd2-sd3-sd4-hd1-hd2.smil/playlist.m3u8': lambda url: self.raise_not_found(url),
'http://prima-vod-prep.service.cdn.cra.cz/vod_Prima/_definst_/0001/4844/cze-ao-sd1-sd2-sd3-sd4-hd1-hd2.smil/playlist.m3u8': lambda url: 'test_homepage.html',
'http://api.play-backend.iprima.cz/prehravac/init?_ts=1450875766&_infuse=1&productId=p148175': lambda url: 'test_player_init-2.js',
'http://play.iprima.cz/cestovani-cervi-dirou-s-morganem-freemanem-ii-9': lambda url: 'test_video_page-2.html',
'http://play.iprima.cz/prostreno?season=p14894&action=remove': lambda url: 'test_remove_all_filters.html',
'https://play.iprima.cz/tdi/dalsi?filter=allShows&sort[]=title&offset=54': lambda url: 'test_ajax_response.data',
'https://play.iprima.cz/tdi/dalsi/prostreno?season=p14877&sort[]=Rord&sort[]=latest&offset=18': lambda url: 'test_ajax_response_p.data'
}
self.url_map.update(url_map)
def get(self, url):
filename = self._get_filename_from_map(url)
return self._get_cache(filename)
def post(self, url, params):
filename = self._get_filename_from_map(url)
return self._get_cache(filename)
def _get_filename_from_map(self, url):
if not self.url_map.has_key(url):
print "ERROR! not found in url map: " + url
raise urllib2.HTTPError(url, 500, 'Internal server error', None, None)
return
get_url = self.url_map[url]
return get_url(url)
def _get_cache(self, filename):
fl = open(filename, 'r')
content = fl.read()
return content
def raise_not_found(self, url):
raise urllib2.HTTPError(url, 404, 'Not found', None, None)
class PrimaPlayUnitTest(unittest.TestCase):
def setUp(self):
pass
def test_get_player_init_link(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
self.assertEqual(prima_play.get_player_init_url('p135603'),
'http://api.play-backend.iprima.cz/prehravac/init?_ts=1450875766&_infuse=1&productId=p135603')
def test_get_video_link__sd(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
self.assertEqual(prima_play.get_video_link('p135603'),
'http://prima-vod-prep.service.cdn.cra.cz/vod_Prima/_definst_/0000/5314/cze-ao-sd1-sd2-sd3-sd4.smil/playlist.m3u8')
def test_get_video_link__hd(self):
prima_play = PrimaPlay.Parser(mockUserAgent({
'http://prima-vod-prep.service.cdn.cra.cz/vod_Prima/_definst_/0000/5314/cze-ao-sd1-sd2-sd3-sd4-hd1-hd2.smil/playlist.m3u8': lambda url: 'test_homepage.html',
}), mockTime())
self.assertEqual(prima_play.get_video_link('p135603'),
'http://prima-vod-prep.service.cdn.cra.cz/vod_Prima/_definst_/0000/5314/cze-ao-sd1-sd2-sd3-sd4-hd1-hd2.smil/playlist.m3u8')
def test_get_video_link__force_sd(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime(), False)
self.assertEqual(prima_play.get_video_link('p135603'),
'http://prima-vod-prep.service.cdn.cra.cz/vod_Prima/_definst_/0000/5314/cze-ao-sd1-sd2-sd3-sd4.smil/playlist.m3u8')
def test_get_next_list(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
next_list = prima_play.get_next_list('https://play.iprima.cz/tdi/dalsi?filter=allShows&sort[]=title&offset=54')
self.assertEqual(next_list.next_link,
'https://play.iprima.cz/tdi/dalsi?filter=allShows&sort[]=title&offset=72')
self.assertEqual(len(next_list.list), 18)
self.assertEqual(next_list.list[0].title, u'Největší esa mafie 1 Epizoda')
self.assertEqual(next_list.list[0].link, 'http://play.iprima.cz/nejvetsi-esa-mafie-1')
def test_get_next_list_series(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
next_list = prima_play.get_next_list('https://play.iprima.cz/tdi/dalsi/prostreno?season=p14877&sort[]=Rord&sort[]=latest&offset=18')
self.assertEqual(next_list.next_link,
'https://play.iprima.cz/tdi/dalsi/prostreno?season=p14877&sort[]=Rord&sort[]=latest&offset=36')
def test_get_page__player_page(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
page = prima_play.get_page('http://play.iprima.cz/prostreno-IX-9')
self.assertEqual(page.player.title, u'Prostřeno!')
self.assertEqual(page.player.video_link,
'http://prima-vod-prep.service.cdn.cra.cz/vod_Prima/_definst_/0000/5314/cze-ao-sd1-sd2-sd3-sd4.smil/playlist.m3u8')
self.assertEqual(page.player.image_url,
'http://static.play-backend.iprima.cz/cdn/img/splash169/p135609-p183945/l_xhdpi')
self.assertEqual(page.player.description,
'Zábavná porce vašeho oblíbeného pořadu Prostřeno!')
self.assertEqual(page.player.broadcast_date, '16.12.2015')
self.assertEqual(page.player.duration, '42 min')
self.assertEqual(page.player.year, '2015')
self.assertEqual(len(page.video_lists), 2)
self.assertEqual(page.video_lists[0].title, u'Další epizody')
self.assertEqual(page.video_lists[0].link,
'http://play.iprima.cz/prostreno-IX-9?season=p135603&sort[]=ord&sort[]=Rlatest')
self.a | ssertEqual(len(page.video_lists[0].item_list), 20)
self.assertEqual(page.video_lists[0].item_list[0].title,
u'Prostřeno! Sezóna 12: Epizoda 9')
self. | assertEqual(page.video_lists[0].item_list[0].link,
'http://play.iprima.cz/prostreno/videa/prostreno-xii-9')
def test_get_page__player_page_2(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
page = prima_play.get_page('http://play.iprima.cz/cestovani-cervi-dirou-s-morganem-freemanem-ii-9')
self.assertEqual(page.player.title, u'Cestování červí dírou s Morganem Freemanem II (7)')
self.assertEqual(page.player.video_link,
'http://prima-vod-prep.service.cdn.cra.cz/vod_Prima/_definst_/0001/4844/cze-ao-sd1-sd2-sd3-sd4-hd1-hd2.smil/playlist.m3u8')
self.assertEqual(page.player.image_url, None)
def test_get_page__homepage(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
page = prima_play.get_page('http://play.iprima.cz')
self.assertEqual(page.player, None)
self.assertEqual(len(page.video_lists), 8)
self.assertEqual(page.video_lists[1].title, u'Pořady a Seriály')
self.assertEqual(page.video_lists[1].link, None)
self.assertEqual(len(page.video_lists[1].item_list), 19)
self.assertEqual(page.video_lists[1].item_list[0].title,
u'Ohnivý kuře 32 Epizod')
|
atarola/pyjojo | pyjojo/util.py | Python | mit | 1,563 | 0.006398 | #!/usr/bin/env python
import os
import pkgutil
import logging
import sys
import tornado.web
from pyjojo.config import config
from pyjojo.scripts import create_collection
log = logging.getLogger(__name__)
class route(object):
"""
decorates RequestHandlers and builds up a list of routables handlers
From: https://gist.github.com/616347
"""
_routes = []
def __init__(self, uri, name=None):
self._uri = uri
self.name = name
def __call__(self, _handler):
"""gets called when we class decorate"""
log.info("Binding {0} to route {1}".format(_handler.__name__, self._uri))
| name = self.name and self.name or _handler.__name__
self._routes.append(tornado.web.url(self._uri, _handler, name=name))
return _handler
@classmethod
def get_routes(self):
return self._routes
def setup_logging():
""" setup the logging system """
base_log = logging.getLogger()
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"))
base | _log.addHandler(handler)
base_log.setLevel(logging.DEBUG)
return handler
def create_application(debug):
# import the handler file, this will fill out the route.get_routes() call.
import pyjojo.handlers
application = tornado.web.Application(
route.get_routes(),
scripts=create_collection(config['directory']),
debug=debug
)
return application
|
BitWriters/Zenith_project | zango/lib/python3.5/site-packages/pip/_vendor/html5lib/_inputstream.py | Python | mit | 32,532 | 0.0004 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type, binary_type
from pip._vendor.six.moves import http_client, urllib
import codecs
import re
from pip._vendor import webencodings
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import ReparseException
from . import _utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]" # noqa
if _utils.supports_lone_surrogates:
# Use one extra step of indirection and create surrogates with
# eval. Not using this indirection would introduce an illegal
# unicode literal on platforms not supporting such lone
# surrogates.
assert invalid_unicode_no_surrogate[-1] == "]" and invalid_unicode_no_surrogate.count("]") == 1
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate[:-1] +
eval('"\\uD800-\\uDFFF"') + # pylint:disable=eval-used
"]")
else:
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate)
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, **kwargs):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
if (isinstance(source, http_client.HTTPResponse) or
# Also check for addinfourl wrapping HTTPResponse
(isinstance(source, urllib.response.addbase) and
isinstance(source.fp, http_client.HTTPResponse))):
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
encodings = [x for x in kwargs if x.endswith("_encoding")]
if encodings:
raise TypeError("Cannot set an encoding with a unicode input, set %r" % encodings)
return HTMLUnicodeInputStream(source, **kwargs)
else:
return HTMLBinaryInputStream(source, **kwargs)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
if not _utils.supports_lone_surrogates:
# Such platforms will have already checked for such
# surrogate errors, so no need to do this checking.
self.reportCharacterErrors = None
elif len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
else:
self.reportCharacterErrors = self.characterErrorsUCS2
# List of where new lines occur
self.newLines = [0]
self.charEncoding = (lookupEncoding("utf-8"), "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines | in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read' | ):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfi |
JBmiog/IOT | server_script/tests/email_handler_tests.py | Python | gpl-3.0 | 279 | 0.003584 | import unittest
import email_hand | ler
class EmailHandlerTest(unittest.TestCase):
def test_tx_email(self):
message = "Subject: License plates scan results\n\n"
ms = email_handler.Emailer()
result = ms.tx_email(message)
| self.assertTrue(result) |
LyleMi/Trafficker | Trafficker/packets/packet.py | Python | mit | 5,253 | 0.000381 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from Trafficker.layer.arp import ARP
from Trafficker.layer.cldap import CLDAP
from Trafficker.layer.dns import DNS
from Trafficker.layer.http import HTTP
from Trafficker.layer.icmp import ICMP
from Trafficker.layer.igmp import IGMP
from Trafficker.layer.ip import IP
from Trafficker.layer.ipv6 import IPv6
from Trafficker.layer.mac import ETHER
from Trafficker.layer.tcp import TCP
from Trafficker.layer.udp import UDP
from Trafficker.layer.smtp import SMTP
from Trafficker.layer.pop import POP
from Trafficker.layer.vlan import VLAN
from Trafficker.layer.ntp import NTP
from Trafficker.packets.buffer import Buffer
class Packet(object):
"""traffic packet"""
def __init__(self, data, header=None):
super(Packet, self).__init__()
self.raw = data
self.header = {}
if header is not None:
self.raw = header + self.raw
header = Buffer(header)
self.h | eader['GMTtime'], self.header['MicroTime'], self.header['caplen'], self.header['len'] = header.unpack("IIII")
print(self.header)
data = Buffer(data)
mac = ETHER.unpack(data.get(14))
self.len = len(data)
self.mac = mac
self.layers = [mac]
se | lf.srcip = ""
self.dstip = ""
self.protocol = ""
self.srcp = 0
self.dstp = 0
ntype = mac.type
if mac.type == ETHER.ethertypes["VLAN"]:
vlan = VLAN.unpack(data.get(4))
self.layers.append(vlan)
ntype = vlan.type
if ntype == ETHER.ethertypes["IPv4"]:
ip = IP.unpack(data.get(20))
self.srcip = ip.ssrc
self.dstip = ip.sdst
self.layers.append(ip)
self.protocol = ip.sprotocol
if ip.protocol == IP.Protocol.TCP:
tcp = TCP.unpack(data, ip.tl - 40)
self.srcp = tcp.srcp
self.dstp = tcp.dstp
self.layers.append(tcp)
if 80 in [tcp.srcp, tcp.dstp]:
self.protocol = "HTTP"
# http = HTTP.unpack(tcp.payload)
http = HTTP()
self.layers.append(http)
elif 25 in [tcp.srcp, tcp.dstp]:
self.protocol = "SMTP"
smtp = SMTP.unpack(tcp.payload)
self.layers.append(smtp)
elif 110 in [tcp.srcp, tcp.dstp]:
self.protocol = "POP"
pop = POP.unpack(tcp.payload)
self.layers.append(pop)
elif ip.protocol == IP.Protocol.UDP:
udp = UDP.unpack(data.get(8))
self.srcp = udp.src
self.dstp = udp.dst
self.layers.append(udp)
if 53 in [udp.dst, udp.src]:
self.protocol = "DNS"
dns = DNS.unpack(data)
self.layers.append(dns)
elif 389 in [udp.dst, udp.src]:
self.protocol = "CLDAP"
cldap = CLDAP.unpack(data)
self.layers.append(cldap)
elif 123 in [udp.dst, udp.src]:
self.protocol = "NTP"
ntp = NTP.unpack(data)
self.layers.append(ntp)
else:
udp.payload = data.getremain()
elif ip.protocol == IP.Protocol.ICMP:
icmp = ICMP.unpack(data.getremain())
self.layers.append(icmp)
elif ip.protocol == IP.Protocol.IGMP:
igmp = IGMP.unpack(data.getremain())
self.layers.append(igmp)
elif ntype == ETHER.ethertypes["ARP"]:
self.protocol = "ARP"
arp = ARP.unpack(data.get(28))
self.layers.append(arp)
self.srcip = arp.sip
self.dstip = arp.dip
elif ntype == ETHER.ethertypes["IPv6"]:
self.protocol = "IPv6"
ipv6 = IPv6.unpack(data.get(40))
self.layers.append(ipv6)
self.srcip = ipv6.sip
self.dstip = ipv6.dip
elif ntype not in ETHER.ethertypes.values():
print('Unsupport type %s' % ntype)
else:
for etype in ETHER.ethertypes:
if ntype == ETHER.ethertypes[etype]:
self.protocol = etype
break
def json(self):
ret = {}
ret['raw'] = self.raw.hex()
ret['srcip'] = self.srcip
ret['dstip'] = self.dstip
ret['protocol'] = self.protocol
ret['layers'] = []
for l in self.layers:
ret['layers'].append(l.json())
return ret
def __repr__(self):
if 'GMTtime' in self.header:
timearray = time.localtime(self.header['GMTtime'])
timestr = time.strftime("%Y-%m-%d %H:%M:%S", timearray)
else:
timestr = ''
return "<[%s] %s %s(%s):%s -> %s(%s):%s>" % (
timestr,
self.protocol,
self.srcip,
self.mac.srcmac,
self.srcp,
self.dstip,
self.mac.dstmac,
self.dstp
)
|
calpeyser/google-cloud-python | trace/google/cloud/trace/client.py | Python | apache-2.0 | 6,103 | 0 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Stackdriver Trace API."""
from google.cloud.trace._gax import make_gax_trace_api
from google.cloud.client import ClientWithProject
from google.cloud._helpers import _datetime_to_pb_timestamp
class Client(ClientWithProject):
"""Client to bundle configuration needed for API requests.
:type project: str
:param project: The project which the client acts on behalf of.
If not passed, falls back to the default inferred from
the environment.
:type credentials: :class:`~google.auth.credentials.Credentials`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not passed, falls back to the default
inferred from the environment.
"""
_trace_api = None
def __init__(self, project=None, credentials=None):
super(Client, self).__init__(
project=project, credentials=credentials)
@property
def trace_api(self):
"""Helper for trace-related API calls.
See
https://cloud.google.com/trace/docs/reference/v1/rpc/google.devtools.
cloudtrace.v1
"""
self._trace_api = make_gax_trace_api(self)
return self._trace_api
def patch_traces(self, traces, project_id=None, options=None):
"""Sends new traces to Stackdriver Trace or updates existing traces.
:type traces: dict
:param traces: The traces to be patched in the API call.
:type project_id: str
:param project_id: (Optional) ID of the Cloud project where the trace
data is stored.
:type options: :class:`~google.gax.CallOptions`
:param options: (Optional) Overrides the default settings for this
call, e.g, timeout, retries etc.
"""
if project_id is None:
project_id = self.project
self.trace_api.patch_traces(
project_id=project_id,
traces=traces,
options=options)
def get_trace(self, trace_id, project_id=None, options=None):
"""Gets a single trace by its ID.
:type project_id: str
:param project_id: ID of the Cloud project where the trace data is
stored.
:type trace_id: str
:param trace_id: ID of the trace to return.
:type options: :class:`~google.gax.CallOptions`
:param options: (Optional) Overrides the default settings for this
call, e.g, timeout, retries etc.
:rtype: dict
:returns: A Trace dict.
"""
if project_id is None:
project_id = self.project
return self.trace_api.get_trace(
project_id=project_id,
trace_id=trace_id,
options=options)
def list_traces(
self,
project_id=None,
view=None,
page_size=None,
start_time=None,
end_time=None,
filter_=None,
order_by=None,
page_token=None):
"""Returns of a list of traces that match the filter conditions.
:type project_id: str
:param project_id: (Optional) ID of the Cloud project where the trace
data is stored.
:type view: :class:`google.cloud.gapic.trace.v1.enums.
ListTracesRequest.ViewType`
:param view: (Optional) Type of data returned for traces in the list.
Default is ``MINIMAL``.
:type page_size: int
:param page_size: (Optional) Maximum number of traces to return.
If not specified or <= 0, the implementation selects
a reasonable value. The implementation may return
fewer traces than the requested page size.
:type start_time: :class:`~datetime.datetime`
:param start_time: (Optional) Start of the time interval (inclusive)
during which the trace data was collected from the
application.
:type end_time: :class:`~datetime.datetime`
:param end_time: (Optional) End of the time interval (inclusive) during
which the trace data was collected from the
application.
:type filter_: str
:param filter_: (Optional) An optional filter for the request.
:type order_by: str
:param order_by: (Optional) Field used to sort the returned traces.
:type page_token: str
:param page_token: opaque marker for the next "page" of entries. If not
passed, the API will return the first page of
entries.
:rtype: :class:`~google.api.core.page_iterator.Iterator`
:returns: Traces that match the specified filter conditions.
"""
if project_id is None:
project_id = self.project
if start_time is not None:
start_time = _datetime_to_pb_timestamp(start_time)
if end_time is not None:
end_time = _dateti | me_to_pb_timestamp(end_time)
return self.trace_api.list_traces(
project_id=project_id,
view=view,
page_size=page_size,
st | art_time=start_time,
end_time=end_time,
filter_=filter_,
order_by=order_by,
page_token=page_token)
|
NetASM/PyDatapath | pydatapath/utils/__init__.py | Python | gpl-3.0 | 5,607 | 0.000535 | __author__ = 'shahbaz'
# ###############################################################################
# Utility functions #
# ###############################################################################
import sys
from functools import wraps
from logging import StreamHandler
from bitstring import BitArray
def singleton(f):
"""
:param f:
:return:
"""
return f()
def cached(f):
"""
:param f:
:return:
"""
@wraps(f)
def wrapper(*args):
"""
:param args:
:return:
"""
try:
return wrapper.cache[args]
except KeyError:
wrapper.cache[args] = v = f(*args)
return v
wrapper.cache = {}
return wrapper
class frozendict(object):
__slots__ = ["_dict", "_cached_hash"]
def __init__(self, new_dict=None, **kwargs):
"""
:param new_dict:
:param kwargs:
:return:
"""
self._dict = dict()
if new_dict is not None:
self._dict.update(new_dict)
self._dict.update(kwargs)
def update(self, new_dict=None, **kwargs):
"""
:param new_dict:
:param kwargs:
:return:
"""
d = self._dict.copy()
if new_dict is not None:
d.update(new_dict)
d.update(kwargs)
return self.__class__(d)
def remove(self, ks):
"""
:param ks:
:return:
"""
d = self._dict.copy()
for k in ks:
if k in d:
del d[k]
return self.__class__(d)
def pop(self, *ks):
"""
:param ks:
:return:
"""
result = []
for k in ks:
result.append(self[k])
result.append(self.remove(*ks))
return result
def __repr__(self):
"""
:return:
"""
return repr(self._dict)
def __iter__(self):
"""
:return:
"""
return iter(self._dict)
def __contains__(self, key):
"""
:param key:
:return:
"""
return key in self._dict
def keys(self):
"""
:return:
"""
return self._dict.keys()
def values(self):
| """
:return:
"""
return self._dict.values()
def items(self):
"""
:return:
"""
return self._dict.items()
def iterkeys(self):
"""
:return:
"""
return self._dict.iterkeys()
def itervalues(self):
"""
:return:
| """
return self._dict.itervalues()
def iteritems(self):
"""
:return:
"""
return self._dict.iteritems()
def get(self, key, default=None):
"""
:param key:
:param default:
:return:
"""
return self._dict.get(key, default)
def __getitem__(self, item):
"""
:param item:
:return:
"""
return self._dict[item]
def __hash__(self):
"""
:return:
"""
try:
return self._cached_hash
except AttributeError:
h = self._cached_hash = hash(frozenset(self._dict.items()))
return h
def __eq__(self, other):
"""
:param other:
:return:
"""
return self._dict == other._dict
def __ne__(self, other):
"""
:param other:
:return:
"""
return self._dict != other._dict
def __len__(self):
"""
:return:
"""
return len(self._dict)
def indent_str(s, indent=4):
"""
:param s:
:param indent:
:return:
"""
return "\n".join(indent * " " + i for i in s.splitlines())
def repr_plus(ss, indent=4, sep="\n", prefix=""):
"""
:param ss:
:param indent:
:param sep:
:param prefix:
:return:
"""
if isinstance(ss, basestring):
ss = [ss]
return indent_str(sep.join(prefix + repr(s) for s in ss), indent)
class LockStreamHandler(StreamHandler):
'''Relies on a multiprocessing.Lock to serialize multiprocess writes to a
stream.'''
def __init__(self, lock, stream=sys.stderr):
"""
:param lock:
:param stream:
:return:
"""
self.lock = lock
super(MultiprocessStreamHandler, self).__init__(stream)
def emit(self, record):
"""
Acquire the lock before emitting the record.
:param record:
:return:
"""
self.lock.acquire()
super(LockStreamHandler, self).emit(record)
self.lock.release()
class QueueStreamHandler(StreamHandler):
"""
Relies on a multiprocessing.Lock to serialize multiprocess writes to a
stream.
"""
def __init__(self, queue, stream=sys.stderr):
"""
:param queue:
:param stream:
:return:
"""
self.queue = queue
super(QueueStreamHandler, self).__init__(stream)
def emit(self, record):
"""
Acquire the lock before emitting the record.
:param record:
:return:
"""
self.queue.put(record)
def get_bitarray(packet, fields):
"""
:param packet:
:param fields:
:return:
"""
o = 0
a = BitArray()
for h in fields:
l = packet[h]['length']
a[o:(o + l)] = packet[h]['value']
o += l
return a |
orhaneee/machineLearning | MLE1.py | Python | gpl-3.0 | 7,728 | 0.024586 | import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import os
print(os.getcwd())
# Part 1: Load the data and separate it into three arrays; one for each class
data = np.loadtxt("MLE1_iris.data")
data0 = data[0:50,:]
data1 = data[50:100,:]
data2 = data[100:150,:]
# Data array slicing done.
# Part 2: Plot each typr of data for all classes in 1D (with shifts of 0.1 for better visualization)
fig = plt.figure()
plt.plot(data0[:,0], np.ones(len(data0[:,0]))*0.0, '+r', label='Data 0 Class 0')
plt.plot(data1[:,0], np.ones(len(data1[:,0]))*0.1, '+g', label='Data 0 Class 1')
plt.plot(data2[:,0], np.ones(len(data2[:,0]))*0.2, '+b', label='Data 0 Class 2')
plt.plot(data0[:,1], np.ones(len(data0[:,1]))*1.0, 'xr', label='Data 1 Class 0')
plt.plot(data1[:,1], np.ones(len(data1[:,1]))*1.1, 'xg', label='Data 1 Class 1')
plt.plot(data2[:,1], np.ones(len(data2[:,1]))*1.2, 'xb', label='Data 1 Class 2')
plt.plot(data0[:,2], np.ones(len(data0[:,2]))*2.0, '.r', label='Data 2 Class 0')
plt.plot(data1[:,2], np.ones(len(data1[:,2]))*2.1, '.g', label='Data 2 Class 1')
plt.plot(data2[:,2], np.ones(len(data2[:,2]))*2.2, '.b', label='Data 2 Class 2')
plt.plot(data0[:,3], np.ones(len(data0[:,3]))*3.0, '1r', label='Data 3 Class 0')
plt.plot(data1[:,3], np.ones(len(data1[:,3]))*3.1, '1g', label='Data 3 Class 1')
plt.plot(data2[:,3], np.ones(len(data2[:,3]))*3.2, '1b', label='Data 3 Class 2')
plt.legend(fontsize=9, loc=3)
# Part 3: Examining the plots above select two of the data types and plot them in 2D - one data type for each axis. Let's say you chose ath and bth columns as your data. This means you have to plot dataN[:,a] vs dataN[:,b] for N=0,1,2.
second_fig = plt.figure() # New figure instance.
plt.plot(data0[:,2], data0[:,3], '+r', label='Data 0')
plt.plot(data1[:,2], data1[:,3], '.g', label='Data 1')
plt.plot(data2[:,2], data2[:,3], '1b', label='Data 2')
plt.legend(fontsize=9, loc=2)
# Part 4: Using the two datatype you have chosen, extract the 2D Gaussian (Normal) distribution parameters. Numpy functions are called here to be used ONLY for validation of your results.
mx0 = np.mean(data0[:,2])
my0 = np.mean(data0[:,3])
cov0 = np.cov(data0[:,2:4].T)
mx1 = np.mean(data1[:,2])
my1 = np.mean(data1[:,3])
cov1 = np.cov(data1[:,2:4].T)
mx2 = np.mean(data2[:,2])
my2 = np.mean(data2[:,3])
cov2 = np.cov(data2[:,2:4].T)
def mean_find(data):
return float(sum(data)) / max(len(data), 1)
mx0_self = mean_find(data0[:,2])
my0_self = mean_find(data0[:,3])
def cov_finder(x,y):
"""
Covariance matrix finding with formula E((x-mu(x)) * (y-mu(y)))
"""
if x.ndim is not 1:
return False
mean_x = mean_find(x)
mean_y = mean_find(y)
data = [ (x[j] - mean_x ) * ( y[j] - mean_y ) for j in range(len(x))]
return sum(data) / len(data)
cov0_self = np.array([[ cov_finder(data0[:,2], data0[:,2]), cov_finder(data0[:,2], data0[:,3]) ],
[ cov_finder(data0[:,3], data0[:,2]), cov_finder(data0[:,3], data0[:,3]) ]])
mx1_self = mean_find(data1[:,2])
my1_self = mean_find(data1[:,3])
cov1_self = np.array([[ cov_finder(data1[:,2], data1[:,2]), cov_finder(data1[:,2], data1[:,3]) ],
[ cov_finder(data1[:,3], data1[:,2]), cov_finder(data1[:,3], data1[:,3]) ]])
mx2_self = mean_find(data2[:,2])
my2_self = mean_find(data2[:,3])
cov2_self = np.array([[ cov_finder(data2[:,2], data2[:,2]), cov_finder(data2[:,2], data2[:,3]) ],
[ cov_finder(data2[:,3], data2[:,2]), cov_finder(data2[:,3], data2[:,3]) ]])
# Part 5: Plot the Gaussian surfaces for each class.
## First, we generate the grid to compute the Gaussian function on.
vals = np.linspace(np.min(data),np.max(data) , 500)
x,y = np.meshgrid(vals, vals)
## Next, we define and implement the 2D Gaussian function.
def gaussian_2d(x,y,mx,my,cov):
''' x and y are the 2D coordinates to calculate the function value
mx and my are the mean parameters in x and y axes
cov is the 2x2 variance-covariance matrix'''
deviation_x = np.sqrt(cov[0][0])
deviation_y = np.sqrt(cov[1][1])
rho = cov[0][1] / (deviation_x*deviation_y)
a = 1/(2*np.pi*deviation_x*deviation_y*np.sqrt(1-np.square(rho)))
b = np.exp( ((-1)/(2*(1-np.square(rho)))) * ( (np.square(x-mx)/cov[0][0]) + (np.square(y-my)/cov[1][1]) - (2*rho*(x-mx)*(y-my)/deviation_x*deviation_y) ) )
ret = a*b
# Returning calculation.
return ret
## Finally, we compute the Gaussian function outputs for each entry in our mesh and plot the surface for each class.
z0 = gaussian_2d(x, y, mx0, my0, cov0)
z1 = gaussian_2d(x, y, mx1, my1, cov1)
z2 = gaussian_2d(x, y, mx2, my2, cov2)
fig0 = plt.figure()
ax0 = fig0.add_subplot(111, projection='3d')
ax0.plot_surface(x, y, z0, cmap=cm.jet, linewidth=0, antialiased=False)
fig1 = plt.figure()
ax1 = fig1.add_subplot(111, projection='3d')
ax1.plot_surface(x, y, z1, cmap=cm.jet, linewidth=0, antialiased=False)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111, projection='3d')
ax2.plot_surface(x, y, z2, cmap=cm.jet, linewidth=0, antialiased=False)
plt.show()
# Part 6: Classify each sample in the dataset based on your findings and assign a class label. Explain your reasoning behind your implementation with few sentences
lbl = []
for d in data:
prop_first = gaussian_2d(d[2], d[3], mx0, my0, cov0)
prop_sec = gaussian_2d(d[2], d[3], mx1, my1, cov1)
prop_third = gaussian_2d(d[2], d[3], mx2, my2, cov2)
if (prop_first > prop_sec) and (prop_first > prop_third):
label = 0
elif (prop_sec > prop_first) and (prop_sec > prop_third):
label = 1
else:
label = 2
lbl.append(label)
# Part 7: Calculate the success rate - the percentage of correctly classified samples
success = 0
for i in range(len(data)):
if lbl[i] == data[i,4]:
success += 1
success_rate = float(success*100/150)
print 'Success rate is %4.2f %%' %success_rate
# Part 8: Repeat the same process for non-overlapping training and test sets.
data_test = np.vstack((data[0:25],data[50:75],data[100:125]))
data_train = np.vstack((data[25:50],data[75:100],data[125:150]))
data_test0 = data_test[da | ta_test[:,4]==0]
data_test1 = data_test[data_test[:,4]==1]
data_test2 = data_test[data_test[:,4]==2]
data_train0 = data_train[data_train[:,4]==0]
data_train1 = data_train[data_train[:,4]==1]
data_train2 = data_train[data_train[:,4]==2]
''' Second part of
homework. |
Calculations :
@mean,
@cov'''
mx0_train = np.mean(data_train0[:,2])
my0_train = np.mean(data_train0[:,3])
mx1_train = np.mean(data_train1[:,2])
my1_train = np.mean(data_train1[:,3])
mx2_train = np.mean(data_train2[:,2])
my2_train = np.mean(data_train2[:,3])
cov0_train = np.cov(data_train0[:,2:4].T)
cov1_train = np.cov(data_train1[:,2:4].T)
cov2_train = np.cov(data_train2[:,2:4].T)
lbl_train = []
for each_data in data_train:
prop_t1 = gaussian_2d(each_data[2], each_data[3], mx0_train, my0_train, cov0_train)
prop_t2 = gaussian_2d(each_data[2], each_data[3], mx1_train, my1_train, cov1_train)
prop_t3 = gaussian_2d(each_data[2], each_data[3], mx2_train, my2_train, cov2_train)
if (prop_t1 > prop_t2) and (prop_t1 > prop_t3):
label_train = 0
elif (prop_t2 > prop_t1) and (prop_t2 > prop_t3):
label_train = 1
else:
label_train = 2
lbl_train.append(label_train)
success_train = 0
for i in range(len(data_train)):
if lbl_train[i] == data_train[i,4]:
success_train += 1
success_rt = float(success_train*100/75)
print 'Success rate is %4.2f %%' %success_rt
|
lssfau/walberla | tests/lbm/codegen/InplaceStreamingCodegen.py | Python | gpl-3.0 | 4,302 | 0.006044 | from dataclasses import replace
from lbmpy_walberla import generate_alternating_lbm_sweep, generate_boundary, generate_alternating_lbm_boundary
from pystencils_walberla import CodeGeneration, generate_sweep, generate_info_header
from pystencils import Target, CreateKernelConfig
from lbmpy import LBMConfig, LBMOptimisation, LBStencil, Method, Stencil
from lbmpy.creationfunctions import create_lb_collision_rule, create_lb_ast
from lbmpy.macroscopic_value_kernels import macroscopic_values_setter
from lbmpy.boundaries import NoSlip, UBB, ExtrapolationOutflow
from lbmpy.advanced_streaming import Timestep
from pystencils import Field
# Common Setup
stencil = LBStencil(Stencil.D3Q27)
target = Target.CPU
inplace_pattern = 'aa'
two_fields_pattern = 'pull'
namespace = 'lbmpy'
f_field = Field.create_generic('f', stencil.D, index_shape=(stencil.Q,), layout='fzyx')
f_field_tmp = Field.create_generic('f_tmp', stencil.D, index_shape=(stencil.Q,), layout='fzyx')
u_field = Field.create_generic('u', stencil.D, index_shape=(stencil.D,), layout='fzyx')
output = {
'velocity': u_field
}
lbm_config = LBMConfig(stencil=stencil, method=Method.SRT, relaxation_rate=1.5, output=output)
lbm_opt = LBMOptimisation(symbolic_field=f_field,
symbolic_temporary_field=f_field_tmp)
config = CreateKernelConfig(target=target)
collision_rule = create_lb_collision_rule(lbm_config=lbm_config, lbm_optimisation=lbm_opt, config=config)
lb_method = collision_rule.method
noslip = NoSlip()
ubb = UBB((0.05,) + (0,) * (stencil.D - 1))
outflow_normal = (1,) + (0,) * (stencil.D - 1)
outflow_pull = ExtrapolationOutflow(outflow_normal, lb_method, streaming_pattern=two_fields_pattern)
outflow_inplace = ExtrapolationOutflow(outflow_normal, lb_method, streaming_pattern=inplace_pattern)
init_velocity = (0,) * stencil.D
init_kernel_pull = macroscopic_values_setter(lb_method, 1, init_velocity, f_field, streaming_pattern=two_fields_pattern)
init_kernel_inplace = macroscopic_values_setter(
lb_method, 1, init_velocity, f_field, streaming_pattern=inplace_pattern, previous_timestep=Timestep.ODD)
stencil_typedefs = {'Stencil_T': stencil}
field_typedefs = {'PdfField_T': f_field, 'VelocityField_T': u_field}
with CodeGeneration() as ctx:
# Pull-Pattern classes
ast_pull = create_lb_ast(collision_rule=collision_rule,
streaming_pattern=two_fields_pattern, lbm_optimisation=lbm_opt)
generate_sweep(ctx, 'PullSweep', ast_pull, field_swaps=[(f_field, f_field_tmp)], namespace=namespace)
generate_boundary(ctx, 'PullNoSlip', noslip, lb_method,
streaming_pattern=two_fields_pattern, target=target, namespace=namespace)
generate_boundary(ctx, 'PullUBB', ubb, lb_method, streaming_pattern=two_fields_pattern,
target=target, namespace=namespace)
generate_boundary(ctx, 'PullOutflow', outflow_pull, lb_method,
streaming_pattern=two_fields_pattern, target=target, namespace=namespace)
generate_sweep(ctx, 'PullInit', init_kernel_pull, target=target, namespace=namespace)
# Inplace Pattern classes
inplace_lbm_config = replace(lbm_config, streaming_pattern=inplace_pattern)
generate_alternating_lbm_sweep(ctx, 'InPlaceSweep', collision_rule,
lbm_config=inplace_lbm_config, namespace=namespace)
generate_alternating_lbm_boundary(ctx, 'InPlaceNoSlip', noslip, lb_method, streaming_pattern=inplace_pattern,
after_collision=True, target=target, namespace=namespace)
generate_alternating_lbm_boundary(ctx, 'InPlaceUBB', ubb, lb_method, streaming_pattern=inplace_pattern,
after_collision=True, target=target, namespace=namespace)
| generate_alternating_lbm_boundary(ctx, 'InPlaceOutflow', outflow_inplace, lb_method,
streaming_pattern=inplace_pattern,
after_collision=True, target=target, namespace=namespace)
generate_sweep(ctx, 'InPlaceInit', init_kernel_inplace, target=target, namespace=namespace)
generate | _info_header(ctx, "InplaceStreamingCodegen.h",
stencil_typedefs=stencil_typedefs, field_typedefs=field_typedefs)
|
chouseknecht/ansible | lib/ansible/modules/cloud/google/gcp_container_node_pool.py | Python | gpl-3.0 | 38,588 | 0.003706 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_container_node_pool
description:
- NodePool contains the name and configuration for a cluster's node pool.
- Node pools are a set of nodes (i.e. VM's), with a common configuration and specification,
under the control of the cluster master. They may have a set of Kubernetes labels
applied to them, which may be used to reference them during pod scheduling. They
may also be resized up or down, to accommodate the workload.
short_description: Creates a GCP NodePool
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: | str
name:
description:
- The name of the node pool.
required: false
type: str
config:
description:
- The node configuration of the pool.
required: false
type: dict
suboptions:
machine_type:
description:
- The name of a Google Compute Engine machine type (e.g.
- n1-standard-1). If unspecified, the default machine type is n1-standard-1.
required: f | alse
type: str
disk_size_gb:
description:
- Size of the disk attached to each node, specified in GB. The smallest allowed
disk size is 10GB. If unspecified, the default disk size is 100GB.
required: false
type: int
oauth_scopes:
description:
- The set of Google API scopes to be made available on all of the node VMs
under the "default" service account.
- 'The following scopes are recommended, but not required, and by default
are not included: U(https://www.googleapis.com/auth/compute) is required
for mounting persistent storage on your nodes.'
- U(https://www.googleapis.com/auth/devstorage.read_only) is required for
communicating with gcr.io (the Google Container Registry).
- If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring
are enabled, in which case their required scopes will be added.
required: false
type: list
service_account:
description:
- The Google Cloud Platform Service Account to be used by the node VMs. If
no Service Account is specified, the "default" service account is used.
required: false
type: str
metadata:
description:
- The metadata key/value pairs assigned to instances in the cluster.
- 'Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes
in length. These are reflected as part of a URL in the metadata server.
Additionally, to avoid ambiguity, keys must not conflict with any other
metadata keys for the project or be one of the four reserved keys: "instance-template",
"kube-env", "startup-script", and "user-data" Values are free-form strings,
and only have meaning as interpreted by the image running in the instance.
The only restriction placed on them is that each value''s size must be less
than or equal to 32 KB.'
- The total size of all keys and values must be less than 512 KB.
- 'An object containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
required: false
type: dict
image_type:
description:
- The image type to use for this node. Note that for a given image type, the
latest version of it will be used.
required: false
type: str
labels:
description:
- 'The map of Kubernetes labels (key/value pairs) to be applied to each node.
These will added in addition to any default label(s) that Kubernetes may
apply to the node. In case of conflict in label keys, the applied set may
differ depending on the Kubernetes version -- it''s best to assume the behavior
is undefined and conflicts should be avoided. For more information, including
usage and the valid values, see: U(http://kubernetes.io/v1.1/docs/user-guide/labels.html)
An object containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
required: false
type: dict
local_ssd_count:
description:
- The number of local SSD disks to be attached to the node.
- 'The limit for this value is dependant upon the maximum number of disks
available on a machine per zone. See: U(https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits)
for more information.'
required: false
type: int
tags:
description:
- The list of instance tags applied to all nodes. Tags are used to identify
valid sources or targets for network firewalls and are specified by the
client during cluster or node pool creation. Each tag within the list must
comply with RFC1035.
required: false
type: list
preemptible:
description:
- 'Whether the nodes are created as preemptible VM instances. See: U(https://cloud.google.com/compute/docs/instances/preemptible)
for more information about preemptible VM instances.'
required: false
type: bool
accelerators:
description:
- A list of hardware accelerators to be attached to each node.
required: false
type: list
version_added: 2.9
suboptions:
accelerator_count:
description:
- The number of the accelerator cards exposed to an instance.
required: false
type: int
accelerator_type:
description:
- The accelerator type resource name.
required: false
type: str
disk_type:
description:
- Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd')
If unspecified, the default disk type is 'pd-standard' .
required: false
type: str
version_added: 2.9
min_cpu_platform:
description:
- Minimum CPU platform to be used by this instance. The instance may be scheduled
on the specified or newer CPU platform .
required: false
type: str
version_added: 2.9
taints:
description:
- List of kubernetes taints to be applied to each node.
required: false
type: list
version_added: 2.9
suboptions:
key:
description:
- Key for taint.
required: false
type: str
value:
description:
- Value for taint.
required: false
type: str
effect:
description:
- Effect for taint.
required: false
|
Menollo/menosic | client/main.py | Python | gpl-3.0 | 2,756 | 0 | import json
import random
import ssl
import string
import threading
import time
import websocket
import settings
from player import Player
class WebsocketPlayerControl(object):
def __init__(self, player, server=settings.WS_SERVER):
websocket.enableTrace(settings.DEBUG)
rand_chars = string.ascii_uppercase + string.digits
self.player_id = ''.join(random.choice(rand_chars) for _ in range(10))
self.player = player
self.ws = websocket.WebSocketApp(server,
on_open=self.on_open,
on_message=self.on_message,
on_error=self.on_error)
player.song_change_callback = self.song_change
def song_change(self, identifier):
data = {
'action': 'song_change',
'player': self.player_id,
'key': settings.CLIENT_TOKEN,
'playlist': settings.PLAYLIST_ID,
'identifier': identifier
}
self.ws.se | nd(json.dumps(data))
def start(self):
while True:
if settings.DEBUG:
print('opening websocket connection...')
sslopt = {"cert_reqs": ssl.CERT_NONE}
self.ws.run_forever(ping_interval=60, sslopt=sslopt)
time.sleep(10)
def quit(self):
self.ws.send("client disconnect")
self.ws.close()
def on_open(self, ws):
try:
name = settings.CLIEN | T_NAME
except AttributeError:
name = 'Client'
data = {
'action': 'register',
'player': self.player_id,
'key': settings.CLIENT_TOKEN,
'playlist': settings.PLAYLIST_ID,
'name': name
}
ws.send(json.dumps(data))
def on_message(self, ws, message):
if settings.DEBUG:
print('message received:', message)
data = json.loads(message)
if data['action'] == 'play':
self.player.play()
elif data['action'] == 'pause':
self.player.pause()
elif data['action'] == 'update_playlist':
self.player.update_playlist()
elif data['action'] == 'next':
self.player.next()
elif data['action'] == 'play_song':
self.player.play_song(data['identifier'])
def on_error(self, ws, error):
print(error)
def main():
player = Player()
ws = WebsocketPlayerControl(player)
ws_thread = threading.Thread(name='ws', target=ws.start)
try:
ws_thread.start()
player.start()
except KeyboardInterrupt:
player.quit()
ws.quit()
ws_thread.join()
if __name__ == "__main__":
main()
|
rocky/python2-trepan | trepan/inout/base.py | Python | gpl-3.0 | 5,570 | 0.000359 | # -*- coding: utf-8 -*-
# Copyright (C) 2009, 2014-2015 Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""classes to support communication to and from the debugger. This
communcation might be to/from another process or another computer.
And reading may be from a debugger command script.
For example, we'd like to support Sockets, and serial lines and file
reading, as well a readline-type input. Encryption and Authentication
methods might decorate some of the communication channels.
Some ideas originiated as part of Matt Fleming's 2006 Google Summer of
Code project.
"""
NotImplementedMessage = "This method must be overriden in a subclass"
# FIXME: In 2.6 we can really use an Absctract Class (ABC). But for now,
# we want 2.5.x compatibility.
class DebuggerInputBase(object):
""" This is an abstract class that specifies debugger input. """
def __init__(self, inp=None, opts=None):
self.input = None
self.closed = None
return
def close(self):
self.closed = True
if self.input:
self.input.close()
pass
return
def use_history(self):
return False
def open(self, inp, opts=None):
"""Use this to set where to read from. """
raise NotImplementedError(NotImplementedMessage)
def readline(self, use_raw=None):
"""Read a line of input. EOFError will be raised on EOF.
Note that we don't support prompting first. Instead, arrange
to call DebuggerOutput.write() first with the prompt. If
`use_raw' is set raw_input() will be used in that is supported
by the specific input input. If this option is left None as is
normally expected the value from the class initialization is
used.
"""
raise NotImplementedError(NotImplementedMessage)
pass
# FIXME: In 2.6 we can really use an Abstract Class (ABC). But for now,
# we want 2.5.x compatibility.
class DebuggerOutputBase(object):
""" This i | s an abstract class that specifies debugger output. """
def __init__(self, out=None, opts=None):
self.output = None
return
def close(self):
if self.output:
self.output.close()
| pass
return
def flush(self):
raise NotImplementedError(NotImplementedMessage)
def write(self, output):
"""Use this to set where to write to. output can be a
file object or a string. This code raises IOError on error.
"""
raise NotImplementedError(NotImplementedMessage)
def writeline(self, msg):
""" used to write to a debugger that is connected to this
server; `str' written will have a newline added to it
"""
self.write("%s\n" % msg)
return
pass
class DebuggerInOutBase(object):
""" This is an abstract class that specifies debugger input output when
handled by the same channel, e.g. a socket or tty.
"""
def __init__(self, inout=None, opts=None):
self.inout = None
return
def close(self):
if self.inout:
self.inout.close()
pass
return
def flush(self):
raise NotImplementedError(NotImplementedMessage)
def open(self, inp, opts=None):
"""Use this to set where to read from. """
raise NotImplementedError(NotImplementedMessage)
def readline(self, use_raw=None):
"""Read a line of input. EOFError will be raised on EOF.
Note that we don't support prompting first. Instead, arrange
to call DebuggerOutput.write() first with the prompt. If
`use_raw' is set raw_input() will be used in that is supported
by the specific input input. If this option is left None as is
normally expected the value from the class initialization is
used.
"""
raise NotImplementedError(NotImplementedMessage)
def write(self, output):
"""Use this to set where to write to. output can be a
file object or a string. This code raises IOError on error.
"""
raise NotImplementedError(NotImplementedMessage)
def writeline(self, msg):
""" used to write to a debugger that is connected to this
server; `str' written will have a newline added to it
"""
self.write("%s\n" % msg)
return
pass
# Demo
if __name__=='__main__':
class MyInput(DebuggerInputBase):
def open(self, inp, opts=None):
print("open(%s) called" % inp)
pass
pass
class MyOutput(DebuggerOutputBase):
def writeline(self, s):
print "writeline:", s
pass
pass
inp = MyInput()
inp.open('foo')
inp.close()
out = MyOutput()
out.writeline('foo')
try:
out.write('foo')
except NotImplementedError:
print 'Ooops. Forgot to implement write()'
pass
|
dhamaniasad/urbandefinition | setup.py | Python | unlicense | 568 | 0.001761 | from setuptools im | port setup
install_requires = [
'reques | ts',
'beautifulsoup4'
]
setup(
name='urbandefinition',
install_requires=install_requires,
version=0.3,
description='Get Urban Dictionary definitions from the terminal',
author='Asad Dhamani',
author_email='dhamaniasad+code@gmail.com',
url='https://github.com/dhamaniasad/urbandefinition',
license='Unlicense',
py_modules=['urbandefinition'],
entry_points={
'console_scripts': [
'urban = urbandefinition:command_line_runner'
]
}
) |
OpenSmalltalk/vm | processors/ARM/gdb-8.3.1/gdb/python/lib/gdb/command/frame_filters.py | Python | mit | 16,256 | 0.002461 | # Frame-filter commands.
# Copyright (C) 2013-2019 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""GDB commands for working with frame-filters."""
import sys
import gdb
import copy
from gdb.FrameIterator import FrameIterator
from gdb.FrameDecorator import FrameDecorator
import gdb.frames
import itertools
# GDB Commands.
class SetFilterPrefixCmd(gdb.Command):
"""Prefix command for 'set' frame-filter related operations."""
def __init__(self):
super(SetFilterPrefixCmd, self).__init__("set frame-filter",
gdb.COMMAND_OBSCURE,
gdb.COMPLETE_NONE, True)
class ShowFilterPrefixCmd(gdb.Command):
"""Prefix command for 'show' frame-filter related operations."""
def __init__(self):
super(ShowFilterPrefixCmd, self).__init__("show frame-filter",
gdb.COMMAND_OBSCURE,
gdb.COMPLETE_NONE, True)
class InfoFrameFilter(gdb.Command):
"""List all registered Python frame-filters.
Usage: info frame-filters"""
def __init__(self):
super(InfoFrameFilter, self).__init__("info frame-filter",
gdb.COMMAND_DATA)
@staticmethod
def enabled_string(state):
"""Return "Yes" if filter is enabled, otherwise "No"."""
if state:
return "Yes"
else:
return "No"
def print_list(self, title, frame_filters, blank_line):
sorted_frame_filters = sorted(frame_filters.items(),
key=lambda i: gdb.frames.get_priority(i[1]),
reverse=True)
if len(sorted_frame_filters) == 0:
return 0
print(title)
print(" Priority Enabled Name")
for frame_filter in sorted_frame_filters:
name = frame_filter[0]
try:
priority = '{:<8}'.format(
str(gdb.frames.get_priority(frame_filter[1])))
enabled = '{:<7}'.format(
self.enabled_string(gdb.frames.get_enabled(frame_filter[1])))
print(" %s %s %s" % (priority, enabled, name))
except Exception:
e = sys.exc_info()[1]
print(" Error printing filter '"+name+"': "+str(e))
if blank_line:
print("")
return 1
def invoke(self, arg, from_tty):
any_printed = self.print_list("global frame-filters:", gdb.frame_filters, True)
cp = gdb.current_progspace()
any_printed += self.print_list("progspace %s frame-filters:" % cp.filename,
cp.frame_filters, True)
for objfile in gdb.objfiles():
any_printed += self.print_list("objfile %s frame-filters:" % objfile.filename,
objfile.frame_filters, False)
if any_printed == 0:
print ("No frame filters.")
# Internal enable/disable functions.
def _enable_parse_arg(cmd_name, arg):
""" Internal worker function to take an argument from
enable/disable and return a tuple of arguments.
Argumen | ts:
cmd_name: Name of the command invoking this function.
args: The argument as a string.
Returns:
A tuple containing the dictionary, and the argument, or just
the dictionary in the case of "all".
"""
arg | v = gdb.string_to_argv(arg);
argc = len(argv)
if argc == 0:
raise gdb.GdbError(cmd_name + " requires an argument")
if argv[0] == "all":
if argc > 1:
raise gdb.GdbError(cmd_name + ": with 'all' " \
"you may not specify a filter.")
elif argc != 2:
raise gdb.GdbError(cmd_name + " takes exactly two arguments.")
return argv
def _do_enable_frame_filter(command_tuple, flag):
"""Worker for enabling/disabling frame_filters.
Arguments:
command_type: A tuple with the first element being the
frame filter dictionary, and the second being
the frame filter name.
flag: True for Enable, False for Disable.
"""
list_op = command_tuple[0]
op_list = gdb.frames.return_list(list_op)
if list_op == "all":
for item in op_list:
gdb.frames.set_enabled(item, flag)
else:
frame_filter = command_tuple[1]
try:
ff = op_list[frame_filter]
except KeyError:
msg = "frame-filter '" + str(frame_filter) + "' not found."
raise gdb.GdbError(msg)
gdb.frames.set_enabled(ff, flag)
def _complete_frame_filter_list(text, word, all_flag):
"""Worker for frame filter dictionary name completion.
Arguments:
text: The full text of the command line.
word: The most recent word of the command line.
all_flag: Whether to include the word "all" in completion.
Returns:
A list of suggested frame filter dictionary name completions
from text/word analysis. This list can be empty when there
are no suggestions for completion.
"""
if all_flag == True:
filter_locations = ["all", "global", "progspace"]
else:
filter_locations = ["global", "progspace"]
for objfile in gdb.objfiles():
filter_locations.append(objfile.filename)
# If the user just asked for completions with no completion
# hints, just return all the frame filter dictionaries we know
# about.
if (text == ""):
return filter_locations
# Otherwise filter on what we know.
flist = filter(lambda x,y=text:x.startswith(y), filter_locations)
# If we only have one completion, complete it and return it.
if len(flist) == 1:
flist[0] = flist[0][len(text)-len(word):]
# Otherwise, return an empty list, or a list of frame filter
# dictionaries that the previous filter operation returned.
return flist
def _complete_frame_filter_name(word, printer_dict):
"""Worker for frame filter name completion.
Arguments:
word: The most recent word of the command line.
printer_dict: The frame filter dictionary to search for frame
filter name completions.
Returns: A list of suggested frame filter name completions
from word analysis of the frame filter dictionary. This list
can be empty when there are no suggestions for completion.
"""
printer_keys = printer_dict.keys()
if (word == ""):
return printer_keys
flist = filter(lambda x,y=word:x.startswith(y), printer_keys)
return flist
class EnableFrameFilter(gdb.Command):
"""GDB command to enable the specified frame-filter.
Usage: enable frame-filter DICTIONARY [NAME]
DICTIONARY is the name of the frame filter dictionary on which to
operate. If dictionary is set to "all", perform operations on all
dictionaries. Named dictionaries are: "global" for the global
frame filter dictionary, "progspace" for the program space's frame
filter dictionary. If either all, or the two named dictionaries
are not specified, the dictionary name is assumed to be the name
of an "objfile" -- a shared library or an executable.
NAME matches the name of the frame-filter to operate on."""
def __init__(self):
super(EnableFrameFilter, self).__init__("enable frame-filter",
gdb.COMMAND_DATA)
def complet |
flyingoctopus/three.js | utils/exporters/cinema4d/export_to_three.js.py | Python | mit | 8,574 | 0.013296 | '''
author : "George Profenza"
url : ("disturb", "disturbmedia.com/blog","My blog, http://tomaterial.blogspot.com")
Export meshes the three.js 3D Engine by mr.doob's et al.
More details on the engine here:
https://github.com/mrdoob/three.js
Currently supports UVs. If the model doesn't display correctly
you might need to reverse some normals/do some cleanup.
Also, if you use Selection Tags and basic ColorMaterials,
the colours will be picked up as face colors. Call autoColor() on the
model you use for this.
The mesh transformations(position, rotation, scale) are saved
and you can get them using: getPosition(), getRotation() and getScale()
each returning a THREE.Vector3
In short
var myGeom = new myC4DGeom();
var myModel = new THREE.Mesh( myGeom, new THREE.MeshFaceMaterial());
//set transforms
model.position = myGeom.getPosition()
model.rotation = myGeom.getRotation()
model.scale = myGeom.getScale()
//set selection tags colours
myGeom.autoColor()
More details on this exporter and more js examples here:
https://github.com/orgicus/three.js
Have fun!
This script requires Cinema 4D R11.5 minimum and the Py4D Plugin:
http://www.py4d.com/get-py4d/
'''
import c4d
from c4d import documents, UVWTag, storage, plugins, gui, modules, bitmaps, utils
from c4d.utils import *
''' from c4d import symbols as sy, plugins, utils, bitmaps, gui '''
import math
import re
# utils
clean = lambda varStr: re.sub('\W|^(?=\d)','_', varStr)
# from Active State's Python recipies: http://code.activestate.com/recipes/266466-html-colors-tofrom-rgb-tuples/
def RGBToHTMLColor(rgb_tuple):
return '0x%02x%02x%02x' % rgb_tuple
def Export():
if not op: return
if op.GetType() != 5100:
print 'Selected Object is not an editable mesh'
return
unit = 0.001#for scale
fps = doc.GetFps()
bd = doc.GetRenderBaseDraw()
scr = bd.GetFrameScreen()
rd = doc.GetActiveRenderData()
name = op.GetName()
classname = clean(name)
c4dPath = c4d.storage.GeGetC4DPath(c4d.C4D_PATH_LIBRARY)
jsFile = open(c4dPath+'/scripts/Three.js','r')
js = jsFile.read()
htmlFile = open(c4dPath+'/scripts/template.html','r')
html = htmlFile.read()
html = html.replace('%s',classname)
code = 'var %s = function () {\n\n\tvar scope = this;\n\n\tTHREE.Geometry.call(this);\n\n' % classname
def GetMesh(code):
# goto 0
doc.SetTime(c4d.BaseTime(0, fps))
c4d.DrawViews( c4d.DA_ONLY_ACTIVE_VIEW|c4d.DA_NO_THREAD|c4d.DA_NO_REDUCTION|c4d.DA_STATICBREAK )
c4d.GeSyncMessage(c4d.EVMSG_TIMECHANGED)
doc.SetTime(doc.GetTime())
c4d.EventAdd(c4d.EVENT_ANIMATE)
SendModelingCommand(command = c4d.MCOMMAND_REVERSENORMALS, list = [op], mode = c4d.MODIFY_ALL, bc = c4d.BaseContainer(), doc = doc)
verts = op.GetAllPoints()
for v in verts:
code += '\tv( %.6f, %.6f, %.6f );\n' % (v.x, -v.y, v.z)
code += '\n'
ncount = 0
uvcount = 0
faces = op.GetAllPolygons()
normals = op.CreatePhongNormals()
ndirection = 1
hasUV = False
for tag in op.GetTags():
if tag.GetName() == "UVW":
uvw = tag
hasUV = True
for f in faces:
if(f.d == f.c):
if(normals):
code += '\tf3( %d, %d, %d, %.6f, %.6f, %.6f );\n' % (f.a, f.b, f.c, normals[ncount].x*ndirection, normals[ncount].y*ndirection, normals[ncount].z*ndirection)
else:
code += '\tf3( %d, %d, %d );\n' % (f.a, f.b, f.c)
else:
if(normals):
code += '\tf4( %d, %d, %d, %d, %.6f, %.6f, %.6f );\n' % (f.a, f.b, f.c, f.d, normals[ncount].x*ndirection, normals[ncount].y*ndirection, normals[ncount].z*ndirection)
else:
code += '\tf4( %d, %d, %d, %d );\n' % (f.a, f.b, f.c, f.d)
if hasUV:
uv = uvw.GetSlow(uvcount);
# uvs += '[Vector('+str(uv[0].x)+','+str(1.0-uv[0].y)+'),Vector('+str(uv[1].x)+','+str(1.0-uv[1].y)+'),Vector('+str(uv[2].x)+','+str(1.0-uv[2].y)+')],'
if len(uv) == 4:
# {'a': Vector(1, 1, 0), 'c': Vector(0, 0, 0), 'b': Vector(1, 0, 0), 'd': Vect | or(0, 1, 0)}
code += '\tuv( %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f);\n' % (uv['a'].x, uv['a'].y, uv['b'].x, uv['b'].y, uv['b | '].x, uv['b'].y, uv['c'].x, uv['c'].y)
else:
code += '\tuv( %.6f, %.6f, %.6f, %.6f, %.6f, %.6f);\n' % (uv['a'].x, uv['a'].y, uv['b'].x, uv['b'].y, uv['c'].x, uv['c'].y)
ncount += 1
uvcount += 1
code +='\n\tthis.computeCentroids();\n\tthis.computeNormals(true);\n'
#selection color
code +='\n\tscope.colors = {};\n'
code +='\tscope.selections = {};\n'
selName = ''
for tag in op.GetTags():
if(tag.GetType() == 5616): #texture tag
material = tag.GetMaterial()
color = material[c4d.MATERIAL_COLOR_COLOR]
tag.SetBit(c4d.BIT_ACTIVE)
selName = clean(tag[c4d.TEXTURETAG_RESTRICTION])
if len(selName) == 0: print "*** WARNING! *** Missing selection name for material: " + material.GetName()
code += '\tscope.colors["'+selName+'"] = '+str(RGBToHTMLColor((color.x*255,color.y*255,color.z*255)))+';\n'
if tag.GetType() == 5673: #selection tag
print 'selection: ' + tag.GetName()
print 'selection object: ' + tag
sel = tag.GetSelection()
selName = clean(tag.GetName())
ids = sel.GetAll(op.GetPointCount())
indices = [i for i, e in enumerate(ids) if e != 0]
code += '\tscope.selections["'+selName+'"] = '+str(indices)+';\n'
code += '\n\tscope.autoColor = function(){\n'
code += '\t\tfor(var s in this.selections){\n'
code += '\t\t\tfor(var i = 0 ; i < this.selections[s].length; i++) this.faces[this.selections[s][i]].material = [new THREE.MeshBasicMaterial({color:this.colors[s]})];\n'
code += '\t\t}\n\t}\n'
# model position, rotation, scale rotation x,y,z = H,P,B => three.js x,y,z is P,H,B => y,x,z
p = op.GetPos()
r = op.GetRot()
s = op.GetScale()
code += '\n\tscope.getPosition = function(){\treturn new THREE.Vector3'+str((p.x,p.y,p.z))+';\t}\n'
code += '\n\tscope.getRotation = function(){\treturn new THREE.Vector3'+str((r.y,r.x,r.z))+';\t}\n'
code += '\n\tscope.getScale = function(){\treturn new THREE.Vector3'+str((s.x,s.y,s.z))+';\t}\n'
code += '\n'
code += '\tfunction v( x, y, z ) {\n\n'
code += '\t\tscope.vertices.push( new THREE.Vertex( new THREE.Vector3( x, y, z ) ) );\n\n'
code += '\t}\n\n'
code += '\tfunction f3( a, b, c, nx, ny, nz ) {\n\n'
code += '\t\tscope.faces.push( new THREE.Face3( a, b, c, nx && ny && nz ? new THREE.Vector3( nx, ny, nz ) : null ) );\n\n'
code += '\t}\n\n'
code += '\tfunction f4( a, b, c, d, nx, ny, nz ) {\n\n'
code += '\t\tscope.faces.push( new THREE.Face4( a, b, c, d, nx && ny && nz ? new THREE.Vector3( nx, ny, nz ) : null ) );\n\n'
code += '\t}\n\n'
code += '\tfunction uv( u1, v1, u2, v2, u3, v3, u4, v4 ) {\n\n'
code += '\t\tvar uv = [];\n'
code += '\t\tuv.push( new THREE.UV( u1, v1 ) );\n'
code += '\t\tuv.push( new THREE.UV( u2, v2 ) );\n'
code += '\t\tuv.push( new THREE.UV( u3, v3 ) );\n'
code += '\t\tif ( u4 && v4 ) uv.push( new THREE.UV( u4, v4 ) );\n'
code += '\t\tscope.uvs.push( uv );\n'
code += '\t}\n\n'
code += '}\n\n'
code += '%s.prototype = new THREE.Geometry();\n' % classname
code += '%s.prototype.constructor = %s;' % (classname, classname)
SendModelingCommand(command = MCOMMAND_REVERSENORMALS, list = [op], mode = MODIFY_ALL, bc = c4d.BaseContainer(), doc = doc)
return code
|
OpenNFT/OpenNFT | opennft/plugins/onp_roiswglm.py | Python | gpl-3.0 | 3,813 | 0.002098 | # -*- coding: utf-8 -*-
"""
# Plugins
Plugins allow flexible modification and execution of OpenNFT without touching the core codebase. Plugins can access data, process them in a specific way,
and they can be switched on and off according to the user's need.
Each plugin has to be a subclass of *Process class specified in pyniexp.mlplugins. It has to contain a header in a format of dictionary (called META) with prespecified keys:
- plugin_name: It is a freeform text which will be displayed in the plugin dialog and in the logs.
- plugin_time: It is a event timestamp as specified in opennft.eventrecorder. Times, and it determines the execution time of the plugin (so far only t3 is implemented)
- plugin_init: It is the initialization code of the plugin. "{}" can be used to refer to OpenNFT parameters as specified in the P parameter dictionary. It can be a list of
commands, in which case, the first is run to create the object, and the rest are executed afterwards.
- plugin_signal: It is an expression returning to logical value, and it speicies the condition when the plugin can be executed.
- plugin_exec: It is th | e execution code of the plugin, and it is usually calls the plugin's load_data method to transfer some data to the plugin.
*Process classes pyniexp.mlplugins has an abstract/placeholder method called process, which should be overwritten to specify the operation on the data.
- the input to the process method of dataProcess (called data) is a one-dimensional numpy array
- the input to the process method of imageP | rocess (called image) is a multi-dimensional (usually 3D) numpy array as specified during initialization
# ROI step-wise GLM
This plugin demonstrates how to add you own approach (this one is a step-wise addition of each block) for ROI analysis.
__________________________________________________________________________
Copyright (C) 2016-2021 OpenNFT.org
Written by Tibor Auer
""" # noqa: E501
from pyniexp.mlplugins import dataProcess
from loguru import logger
from multiprocessing import Value, RawArray
from numpy import array, meshgrid, savetxt
import matplotlib.pyplot as plt
from os import path
META = {
"plugin_name": "ROI step-wise GLM",
"plugin_time": "t4", # according to opennft.eventrecorder.Times
"plugin_init": [
"ROIswGLM(int({NrROIs}),len({ProtNF}),r'{nfbDataFolder}')",
"self.parent.eng.evalin('base','onp_roiswglm')"
],
"plugin_signal": "self.parent.eng.evalin('base','isfield(mainLoopData,\\\'tmp_rawTimeSeriesAR1\\\')')",
"plugin_exec": "load_data(self.parent.eng.evalin('base','onp_roiswglm'))",
}
class ROIswGLM(dataProcess):
def __init__(self, nROIs, nBlocks, nfbDataFolder):
super().__init__(nROIs*nBlocks, autostart=False)
self.nfbDataFolder = nfbDataFolder
self.nROIs = nROIs
self.nBlocks = nBlocks
self.rtdata = RawArray('d', [0]*self.nROIs*self.nBlocks*self.nBlocks)
self.nData = Value('i', 0)
self.start_process()
def process(self, data):
if any(array(data) != 0):
for r in data:
self.rtdata[self.nData.value] = r
self.nData.value += 1
logger.info(('ROIs: [ ' + '{:.3f} '*len(data) + ']').format(*data))
def finalize_process(self):
dat = array(self.rtdata).reshape(self.nBlocks, self.nROIs, self.nBlocks)
for b in range(0, self.nBlocks):
fname = path.join(path.normpath(self.nfbDataFolder), 'ROIswGLM_{:02d}.txt'.format(b+1))
savetxt(fname=fname, X=dat[b,:,0:b+1].transpose(), fmt='%.3f', delimiter=',')
X, Y = meshgrid(self.nBlocks, self.nBlocks)
for r in range(0, self.nROIs):
ax = plt.subplot(120+(r+1), projection='3d')
ax.plot_surface(X, Y, dat[:,r,:])
plt.show()
|
MaximKsh/web_1sem | repository/askkashirin/askservice/models.py | Python | unlicense | 3,753 | 0.000576 | from django.db import models
from django.contrib.auth.models import User
from django.db.models import Sum, Q
# Create your models here.
class Profile(models.Model):
avatar = models.ImageField(
verbose_name=u'Аватар',
blank=True
)
description = models.TextField(
verbose_name=u'Описание',
blank=True
)
user = models.OneToOneField(
User
)
class Meta:
verbose_name = u'Профиль'
verbose_name_plural = u'Профили'
def __str__(self):
return self.description
class Post(models.Model):
author = models.ForeignKey(
User
)
creation_date = models.DateTimeField(
auto_now_add=True,
verbose_name=u'Дата создания'
)
class Meta:
verbose_name = u'Сообщение'
verbose_name_plural = u'Сообщения'
def __str__(self):
return self.author.username + str(self.creation_date)
class Tag(models.Model):
tag = models.CharField(
max_length=255,
verbose_name=u'Тег',
unique=True
)
class Meta:
verbose_name = u'Тег'
verbose_name_plural = u'Теги'
def __str__(self):
return str(self.tag)
class LikeManager(models.Manager):
def UserLikedPost(self, user, post):
return self.filter(Q(author=user) & Q(post=post))
class Like(models.Model):
author = models.ForeignKey(
User,
verbose_name=u'Автор'
)
creation_date = models.DateTimeField(
auto_now_add=True,
verbose_name=u'Дата создания'
)
post = models.ForeignKey(
Post,
verbose_name=u'К сообщению'
)
rating = models.IntegerField(
verbose_name=u'Рейтинг'
)
objects = LikeManager()
class Meta:
verbose_nam | e = u'Лайк'
verbose_name_plural = u'Лайки'
def __str__(self):
return str(self.rating)
class QuestionManager(models.Manager):
def newest(self):
return self.order_by('-creation_date')
def hot(self):
return self.annotate(Sum('like__rating')).order_by('-like__rating__sum') |
def byTag(self, tag):
return self.filter(tags__tag=tag)
def find(self, query):
return self.filter(Q(title__contains=query) | Q(content__contains=query))
class Question(Post):
title = models.CharField(
max_length=255,
verbose_name=u'Название'
)
content = models.TextField(
verbose_name=u'Содержание',
blank=True
)
tags = models.ManyToManyField(
Tag,
verbose_name=u'Теги',
blank=True
)
# переопределение стандартного менеджера с дополнением
objects = QuestionManager()
class Meta:
verbose_name = u'Вопрос'
verbose_name_plural = u'Вопросы'
def __str__(self):
return self.title
class AnswerManager(models.Manager):
def for_question(self, question):
return self.filter(to_question=question)
class Answer(Post):
content = models.TextField(
verbose_name=u'Содержание'
)
to_question = models.ForeignKey(
Question,
verbose_name=u'На вопрос'
)
approved = models.BooleanField(
verbose_name=u'Правильный',
default=False
)
# переопределение стандартного менеджера с дополнением
objects = AnswerManager()
class Meta:
verbose_name = u'Ответ'
verbose_name_plural = u'Ответы'
def __str__(self):
return self.content
|
jahan-addison/gridpaste | pastes/migrations/0001_initial.py | Python | mit | 638 | 0.001567 | # Generated by Django 2.1.7 on 2019-04-26 07:20
from django.db import migrations, mod | els
from django.contrib.postgres.fields import JSONField
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Pastes',
fields=[
('id', models.AutoField(primary_key=True, serialize=True)),
('title', models.TextField()),
('user', models.TextField(default='anonymous')),
('paste', JSONField()),
('token', models.Text | Field()),
],
),
]
|
bobbypaton/GoodVibes | goodvibes/__main__.py | Python | mit | 485 | 0 | # let guys could run Goodvibes by python -m goodvibe | s <args>
# Copied from __main__.py in pip
from __future__ import absolute_import
import os
import sys
# If we are run | ning from a wheel, add the wheel to sys.path
# This allows the usage python pip-*.whl/pip install pip-*.whl
if __package__ == '':
path = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, path)
from goodvibes import GoodVibes # noqa
if __name__ == '__main__':
sys.exit(GoodVibes.main())
|
nilver/MiniProyecto | utils.py | Python | apache-2.0 | 693 | 0.010101 | import numpy as np
def softmax(x):
xt = np.exp(x - np.max(x))
return | xt / np.sum(xt)
def save_model_parameters_theano(outfile, model):
U, V, W = model.U.get_value(), model.V.get_value(), model.W.get_ | value()
np.savez(outfile, U=U, V=V, W=W)
print "Saved model parameters to %s." % outfile
def load_model_parameters_theano(path, model):
npzfile = np.load(path)
U, V, W = npzfile["U"], npzfile["V"], npzfile["W"]
model.hidden_dim = U.shape[0]
model.word_dim = U.shape[1]
model.U.set_value(U)
model.V.set_value(V)
model.W.set_value(W)
print "Loaded model parameters from %s. hidden_dim=%d word_dim=%d" % (path, U.shape[0], U.shape[1])
|
GHOSTnew/HelpBot | HelpBot.py | Python | gpl-3.0 | 5,517 | 0.010332 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
############################# Copyright ################################
# Author: GHOSTnew # 2014 #
########################################################################
# This file is part of HelpBot. #
# HelpBot is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# HelpBot is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with Foobar. If not, see <http://www.gnu.org/licenses/>. #
########################################################################
import socket
import socks
import time
import web
import re
import modules
import ssl
################################ Config #################################################
nick = "HelpBot"
real_name = "Help on our Network"
nickserv = "password nickserv" # for nickserv auth (set None for no ns auth)
channels = ["#opAmanda", "#help"] #channel
host = "server host"
port = 6697
Tor = False
password = None #server password (None by default)
SSL = True #SSL , false by default
import_modules = ["noob", "opamanda", "ubuntu_fr", "wikipedia", "sms_killers", "clients"]
################################ End config ##############################################
class Bot(object):
def __init__ (self, host, port, nick, real_name,Tor, nickserv=None, password=None, SSL=False):
self.host = host
self.port = port
self.nick = nick
self.real_name = real_name
self.password = password
self.nickserv = nickserv
self.ssl = SSL
if Tor == True:
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9050, True)
socket.socket = socks.socksocket
self.sock = socket.socket()
def connect(self):
self.sock.connect((self.host, self.port))
if self.ssl == True:
try:
self.sock = ssl.wrap_socket(self.sock)
self.sock.do_handshake()
except:
print "Failed to do ssl handshake"
time.sleep(5)
self.raw_line('USER ' + self.nick + ' 0 ' + self.nick +' :' + self.real_name)
if self.password:
self.raw_line('PASS ' + self.password)
self.raw_line('NICK ' + self.nick)
self.raw_line('mode +B')
time.sleep(2)
if self.nickserv:
self.send("NickServ", "IDENTIFY " + nickserv)
time.sleep(2)
for chan in channels:
self.join(chan)
def disconnect(self):
self.sock.close()
self.sock = socket.socket()
def read(self):
return self.sock.recv(1024)
def send(self, channel, msg):
self.sock.send('PRIVMSG ' + channel + ' :' + msg + '\r\n')
def notice(self, channel, msg):
self.sock.send('NOTICE ' + channel + ' :' + msg + '\r\n')
def action(self, channel, msg):
self.sock.send('PRIVMSG ' + channel + ' :' + '\001ACTION' + msg + '\001\r\n')
def raw_line(self, line):
self.sock.send(line + '\r\n')
def join(self, channel):
if channel.startswith('#'):
self.sock.send('JOIN ' + channel + '\r\n')
else:
self.sock.send('JOIN #' + channel + '\r\n')
def kick(self, channel, nick, reason = ""):
self.sock.send('KICK ' + channel + ' ' + nick + ' :' + reason + '\r\n')
def main():
HelpBot = Bot(host, port, nick, real_name, Tor, nickserv, password, SSL)
HelpBot.connect()
while True:
data = HelpBot.read()
if not data:
print "connexion lost"
HelpBot.disconnect()
HelpBot.connect()
#break
#print data
if data.find('PING') != -1:
HelpBot.raw_line('PONG ' + data.split()[1] + '\r\n')
elif data.find('PRIVMSG') != -1:
cmd = (': | '.join(data.split (':')[2:])).split( )[0]
channe | l = ''.join (data.split(':')[:2]).split (' ')[-2]
nick_source = (data.split (':')[1]).split('!')[0]
arg = data.split(" ")
args = ''
for index,item in enumerate(arg) :
if index > 3 :
if args == '':
args = item
else :
args += ' ' + item
args = args.split('\r')[0]
if cmd == ".help":
for module in import_modules:
mod = __import__ ("modules." + module, fromlist=import_modules)
mod.onhelp(HelpBot, channel)
else:
for module in import_modules:
mod = __import__ ("modules." + module, fromlist=import_modules)
mod.load(HelpBot, cmd, nick_source, channel, args)
if __name__ == "__main__" :
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.