repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
quantmind/lux | tests/odm/utils.py | Python | bsd-3-clause | 2,440 | 0 |
class SqliteMixin:
config_params = {'DATASTORE': 'sqlite://'}
class OdmUtils:
config_file = 'tests.odm'
async def _create_task(self, token, subject='This is a task', person=None,
**data):
data['subject'] = subject
if person:
data['assigned'] = person['id']
request = await self.client.post(self.api_url('tasks'),
json=data,
token=token)
data = self.json(request.response, 201)
self.assertIsInstance(data, dict)
self.assertTrue('id' in data)
self.assertEqual(data['subject'], subject)
self.assertTrue('created' | in data)
self.assertEqual(len(request.cache.new_items), 1)
self.assertEqual(request.cache.new_items[0]['id'], data['id'])
self.assertFalse(requ | est.cache.new_items_before_commit)
return data
async def _get_task(self, token, id):
request = await self.client.get(
'/tasks/{}'.format(id),
token=token)
response = request.response
self.assertEqual(response.status_code, 200)
data = self.json(response)
self.assertIsInstance(data, dict)
self.assertTrue('id' in data)
return data
async def _delete_task(self, token, id):
request = await self.client.delete(
'/tasks/{}'.format(id),
token=token)
response = request.response
self.assertEqual(response.status_code, 204)
async def _create_person(self, token, username, name=None):
name = name or username
request = await self.client.post(
'/people',
json={'username': username, 'name': name},
token=token)
data = self.json(request.response, 201)
self.assertIsInstance(data, dict)
self.assertTrue('id' in data)
self.assertEqual(data['name'], name)
return data
async def _update_person(self, token, id, username=None, name=None):
request = await self.client.patch(
self.api_url('people/%s' % id),
json={'username': username, 'name': name},
token=token
)
data = self.json(request.response, 200)
self.assertIsInstance(data, dict)
self.assertTrue('id' in data)
if name:
self.assertEqual(data['name'], name)
return data
|
eustislab/horton | horton/correlatedwfn/test/test_lagrange.py | Python | gpl-3.0 | 5,097 | 0.003924 | # -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2015 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
#pylint: skip-file
import numpy as np
from nose.tools import assert_raises
from horton import *
def test_ap1rog_lagrange():
fn_xyz = context.get_fn('test/li2.xyz')
mol = IOData.from_file(fn_xyz)
obasis = get_gobasis(mol.coordinates, mol.numbers, 'cc-pvdz')
lf = DenseLinalgFactory(obasis.nbasis)
occ_model = AufbauOccModel(3)
exp_alpha = lf.create_expansion(obasis.nbasis)
olp = obasis.compute_overlap(lf)
kin = obasis.compute_kinetic(lf)
na = obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, lf)
guess_core_hamiltonian(olp, kin, na, exp_alpha)
er = obasis.compute_electron_repulsion(lf)
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
terms = [
RTwoIndexTerm(kin, 'kin'),
RDirectTerm(er, 'hartree'),
RExchangeTerm(er, 'x_hf'),
RTwoIndexTerm(na, 'ne'),
]
ham = REffHam(terms, exte | rnal)
scf_solver = PlainSCFSolver()
scf_solver(ham, lf, olp, occ_model, exp_alpha)
one = lf.create_two_index(obasis.nbasis)
one.iadd(kin)
one.iadd(na)
# Do AP1roG optimization:
geminal_solver = RAp1rog(lf, occ_model)
| energy, g = geminal_solver(one, er, external['nn'], exp_alpha, olp, False)
geminal_solver.lagrange.assign(np.random.rand(3,25))
x = geminal_solver.geminal._array.ravel(order='C')
dxs = np.random.rand(200, 3*25)*(0.001)
check_delta(x, dxs, geminal_solver)
def fun(x, ham):
iiaa = ham.get_auxmatrix('gppqq')
iaia = ham.get_auxmatrix('lpqpq')
fock = ham.get_auxmatrix('fock')
one = ham.get_auxmatrix('t')
coeff = ham.lagrange._array
lagrangian = ham.compute_total_energy(x.reshape(3,25))
lagrangian += np.dot(coeff.ravel(order='C'), ham.vector_function_geminal(x.ravel(order='C'), iiaa, iaia, one, fock))
return lagrangian
def fun_deriv(x, ham):
iiaa = ham.get_auxmatrix('gppqq')
iaia = ham.get_auxmatrix('lpqpq')
fock = ham.get_auxmatrix('fock')
one = ham.get_auxmatrix('t')
coeff = ham.lagrange._array.ravel(order='C')
gmat = ham.lf.create_two_index(3,25)
gmat.assign(x)
gradient = ham.vector_function_lagrange(coeff,gmat, iiaa, iaia, one, fock)
return gradient.ravel(order='C')
def check_delta(x, dxs, ham):
"""Check the difference between two function values using the analytical gradient
Arguments:
fun
The function whose derivatives must be to be tested
fun_deriv
The implementation of the analytical derivatives
x
The argument for the reference point.
dxs
A list with small relative changes to x
For every displacement in ``dxs``, the following computation is repeated:
1) D1 = 'fun(x+dx) - fun(x)' is computed.
2) D2 = '0.5 (fun_deriv(x+dx) + fun_deriv(x)) . dx' is computed.
A threshold is set to the median of the D1 set. For each case where |D1|
is larger than the threshold, |D1 - D2|, should be smaller than the
threshold.
"""
dn1s = []
dn2s = []
dnds = []
f0 = fun(x, ham)
grad0 = fun_deriv(x, ham)
for dx in dxs:
f1 = fun(x+dx, ham)
grad1 = fun_deriv(x+dx, ham)
grad = 0.5*(grad0+grad1)
d1 = f1 - f0
if hasattr(d1, '__iter__'):
norm = np.linalg.norm
else:
norm = abs
d2 = np.dot(grad.ravel(), dx.ravel())
dn1s.append(norm(d1))
dn2s.append(norm(d2))
dnds.append(norm(d1-d2))
dn1s = np.array(dn1s)
dn2s = np.array(dn2s)
dnds = np.array(dnds)
# Get the threshold (and mask)
threshold = np.median(dn1s)
mask = dn1s > threshold
# Make sure that all cases for which dn1 is above the treshold, dnd is below
# the threshold
if not (dnds[mask] < threshold).all():
raise AssertionError((
'The first order approximation on the difference is too wrong. The '
'threshold is %.1e.\n\nDifferences:\n%s\n\nFirst order '
'approximation to differences:\n%s\n\nAbsolute errors:\n%s')
% (threshold,
' '.join('%.1e' % v for v in dn1s[mask]),
' '.join('%.1e' % v for v in dn2s[mask]),
' '.join('%.1e' % v for v in dnds[mask])
))
|
jbedorf/tensorflow | tensorflow/contrib/nn/python/ops/alpha_dropout.py | Python | apache-2.0 | 3,426 | 0.002335 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import | print_function
import numbers
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
def alpha_dropout(x, keep_ | prob, noise_shape=None, seed=None, name=None): # pylint: disable=invalid-name
"""Computes alpha dropout.
Alpha Dropout is a dropout that maintains the self-normalizing property. For
an input with zero mean and unit standard deviation, the output of
Alpha Dropout maintains the original mean and standard deviation of the input.
See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
Args:
x: A tensor.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `keep_prob` is not in `(0, 1]`.
"""
with ops.name_scope(name, "alpha_dropout", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1.:
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(keep_prob,
dtype=x.dtype,
name="keep_prob")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
# Do nothing if we know keep_prob == 1
if tensor_util.constant_value(keep_prob) == 1:
return x
alpha = -1.7580993408473766
noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
random_tensor = random_ops.random_uniform(noise_shape,
seed=seed,
dtype=x.dtype)
kept_idx = gen_math_ops.greater_equal(random_tensor, 1 - keep_prob)
kept_idx = math_ops.cast(kept_idx, x.dtype)
# Mask
x = x * kept_idx + alpha * (1 - kept_idx)
# Affine transformation parameters
a = (keep_prob + keep_prob * (1 - keep_prob) * alpha ** 2) ** -0.5
b = -a * alpha * (1 - keep_prob)
# Affine transformation
return a * x + b
|
endeepak/pungi | tests/test_string.py | Python | mit | 846 | 0 | import unittest
from pungi import string
from pungi import expect
class StringTest(unittest.TestCase):
def test_pp_with_no_args(self):
expect(string.pp()).toBe("")
def test_pp_of_single_arg(self):
expect(string.pp('1')).toBe("'1'")
expect(string.pp(1)).toBe("1")
def test_pp_of_multiple_args(self):
expect(string.pp(1, 2)).toBe("1, 2")
expect(string.pp(1, [2, 3])).toBe("1, [2, 3]")
def test_pp_of_kwargs(self):
expect(string.pp(a=1, b=2)).toBe("a=1, b=2")
def test_pp_of_args_and_kwargs(self):
expect(string.pp(1, a='a')).toBe("1, a='a'")
|
def test_humanize_camelcase_word(self):
| expect(string.humanize("SomeOne")).toBe("some one")
expect(string.humanize("SomeOneElse")).toBe("some one else")
if __name__ == '__main__':
unittest.main()
|
probablytom/tomwallis.net | core/admin.py | Python | artistic-2.0 | 146 | 0.006849 | __author__ = 'tom'
from django.contrib import admin
from core.model | s import Post, Project
admin.site.register(Post)
admin.site.re | gister(Project) |
antoinecarme/sklearn2sql_heroku | tests/regression/diabetes/ws_diabetes_SVR_poly_hive_code_gen.py | Python | bsd-3-clause | 122 | 0.016393 | from sklearn2sql_heroku.tests.regression import generic a | s reg_gen |
reg_gen.test_model("SVR_poly" , "diabetes" , "hive")
|
JordiCarreraVentura/spellchecker | lib/__init__.py | Python | gpl-3.0 | 127 | 0 |
fro | m TextStreamer import TextStreamer
from parser import (
LinguistListParser
)
from FeatureEngine import Feature | Engine
|
swtp1v07/Savu | savu/test/plugin_test_sart.py | Python | apache-2.0 | 1,105 | 0.000905 | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIE | S OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the spec | ific language governing permissions and
# limitations under the License.
"""
.. module:: plugins_test
:platform: Unix
:synopsis: unittest test classes for plugins
.. moduleauthor:: Mark Basham <scientificsoftware@diamond.ac.uk>
"""
import unittest
from savu.test.plugin_test import PluginTest
class SimpleReconTest(PluginTest):
def setUp(self):
self.plugin_name = "savu.plugins.simple_recon"
class ScikitimageSartTest(PluginTest):
def setUp(self):
self.plugin_name = "savu.plugins.scikitimage_sart"
if __name__ == "__main__":
unittest.main()
|
bjschafer/showdown-sync | cookie_reader.py | Python | apache-2.0 | 2,178 | 0.005051 | ########################################################################################################################
#
# cookie_reader.py
#
| # Purpose: read cookies from the web browser (currently only Chrome supported) and make them into Python objects
#
# Author: Braxton J. Schafer (bjschafer) [bjs]
#
# Creation date: 10/10/2014
#
# Copyright (c) 2014 Braxton J. Schafer
#
# Changelog:
#
############################# | ###########################################################################################
import sqlite3 as sqlite
import sys
import os.path
import json
from pokemon import pokemon
class cookie_reader():
def __init__(self, cookie_location, browser_type):
self.cookie_location = cookie_location
self._expand_tilde()
self.filename = "http_play.pokemonshowdown.com_0.localstorage"
def _get_cookie_location(self):
platform = sys.platform
if (platform == 'darwin'):
return "~/Library/Application Support/Google/Chrome/Default/Local Storage/"
elif (platform == 'linux2'):
return "~/.config/google-chrome/Default/Cookies"
elif (platform == 'win32' or platform == 'win64'):
return "~/AppData/Local/Google/Chrome/User Data/ Default/Local Storage/"
return "Platform not recognized."
def _expand_tilde(self):
self.cookie_location = self.cookie_location.replace('~', os.path.expanduser('~'))
def _read_from_database(self):
conn = sqlite.connect(self.cookie_location + self.filename)
conn.text_factory = str
c = conn.cursor()
c.execute("""SELECT value FROM ItemTable WHERE key='showdown_teams'""")
return c.fetchone()
def _get_json(self):
raw_json = str(self._read_from_database())
raw_json = raw_json[3:-3]
raw_json = raw_json.replace('\\x00', '')
return json.loads(raw_json)
def read_teams(self):
decoded = self._get_json()
for team in decoded:
yield (team['name'], team['format'], [pokemon(t) for t in team['team']])
if __name__ == '__main__':
c = cookie_reader()
for t in c.read_teams():
print(t) |
abdoosh00/edraak | lms/djangoapps/courseware/views.py | Python | agpl-3.0 | 36,829 | 0.002525 | """
Courseware views functions
"""
import logging
import urllib
import json
from collections import defaultdict
from django.utils.translation import ugettext as _
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_GET
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from edxmako.shortcuts import render_to_response, render_to_string
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.db import transaction
from markupsafe import escape
from courseware import grades
from courseware.access import has_access
from courseware.courses import get_courses, get_course, get_studio_url, get_course_with_access, sort_by_announcement
from courseware.masquerade import setup_masquerade
from courseware.model_data import FieldDataCache
from .module_render import toc_for_course, get_module_for_descriptor, get_module
from courseware.models imp | ort StudentModule, StudentModuleHistory
from course_modes.models import CourseMode
from open_ended_grading import open_ended_notifications
from student.models import UserTestGroup, CourseEnrollment
from student.views import single_course_reverification_info
from util.cache import cache, cache_if_anonymous
from xblock.fragment import Fragment
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotF | oundError, NoPathToItem
from xmodule.modulestore.search import path_to_location
from xmodule.tabs import CourseTabList, StaffGradingTab, PeerGradingTab, OpenEndedGradingTab
from xmodule.x_module import STUDENT_VIEW
import shoppingcart
from opaque_keys import InvalidKeyError
from microsite_configuration import microsite
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger("edx.courseware")
template_imports = {'urllib': urllib}
CONTENT_DEPTH = 2
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
@ensure_csrf_cookie
@cache_if_anonymous
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
courses = get_courses(request.user, request.META.get('HTTP_HOST'))[:9]
courses = sort_by_announcement(courses)
return render_to_response("courseware/courses.html", {'courses': courses, 'homepage_promo_video_youtube_id': 'yY2gsC8bL3U'})
def render_accordion(request, course, chapter, section, field_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
toc = toc_for_course(user, request, course, chapter, section, field_data_cache)
context = dict([
('toc', toc),
('course_id', course.id.to_deprecated_string()),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)
] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule, min_depth=None):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first
child with children extending down to content_depth.
For example, if chapter_one has no position set, with two child sections,
section-A having no children and section-B having a discussion unit,
`get_current_child(chapter, min_depth=1)` will return section-B.
Returns None only if there are no children at all.
"""
def _get_default_child_module(child_modules):
"""Returns the first child of xmodule, subject to min_depth."""
if not child_modules:
default_child = None
elif not min_depth > 0:
default_child = child_modules[0]
else:
content_children = [child for child in child_modules if
child.has_children_at_depth(min_depth - 1)]
default_child = content_children[0] if content_children else None
return default_child
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
return _get_default_child_module(xmodule.get_display_items())
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# module has a set position, but the position is out of range.
# return default child.
child = _get_default_child_module(children)
else:
child = None
return child
def redirect_to_course_position(course_module, content_depth):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.id.to_deprecated_string()}
chapter = get_current_child(course_module, min_depth=content_depth)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter, min_depth=content_depth - 1)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.location.name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def chat_settings(course, user):
"""
Returns a dict containing the settings required to connect to a
Jabber chat server and room.
"""
domain = getattr(settings, "JABBER_DOMAIN", None)
if domain is None:
log.warning('You must set JABBER_DOMAIN in the settings to '
'enable the chat widget')
return None
return {
'domain': domain,
# Jabber doesn't like slashes, so replace with dashes
'room': "{ID}_class".format(ID=course.id.replace('/', '-')),
'username': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
|
HardLight/denyhosts | tests/test_counter.py | Python | gpl-2.0 | 4,362 | 0.00321 | from __future__ import print_function, unicode_literals
from datetime import datetime, timedelta
import time
import unittest
from DenyHosts.counter import Counter, CounterRecord
class CounterRecordTest(unittest.TestCase):
def test_init(self):
c = CounterRecord()
self.assertEqual(c.get_count(), 0)
# Counter.__date is initialized with time.asctime(), so there isn't
# much to test beyond the type
self.assertTrue(isinstance(c.get_date(), str))
def test_init_provided_date(self):
"""
CounterRecord.__date is intended to be a string (for some reason; a datetime
object would be more appropriate), but any object can be used. Verify that
what we pass to the constructor is accessible.
"""
date = object()
c = CounterRecord(date=date)
self.assertTrue(c.get_date() is date)
def test_init_provided_count(self):
"""
CounterRecord.__count is intended to be nume | ric, but any object can be used.
Verify that what we pass to the constructor is accessible.
"""
count = object()
c = CounterRecord(count= | count)
self.assertTrue(c.get_count() is count)
def test_str(self):
"""
CounterRecord.__str__ is actually used in PurgeCounter.write_data, so it's
worth testing
"""
count = 1
date = object()
c = CounterRecord(count=count, date=date)
string = '%d:%s' % (count, date)
self.assertEqual(str(c), string)
def test_add(self):
"""
CounterRecord.__add__ is *horrible* design, but that's how it's been for a
very long time. I want test coverage for the current behavior before making
any changes.
"""
c = CounterRecord()
orig_date = c.get_date()
self.assertEqual(c.get_count(), 0)
increment = 4
# !
c + increment
self.assertEqual(c.get_count(), increment)
# Original attempt: self.assertNotEqual(c.get_date(), orig_date)
# time.asctime only provides seconds in that string representation of the
# date, though, so just verify that the two strings are different objects
# since they'll usually be equal
self.assertFalse(c.get_date() is orig_date)
def test_reset_count(self):
c = CounterRecord()
c + 1
orig_date = c.get_date()
c.reset_count()
self.assertEqual(c.get_count(), 0)
self.assertTrue(c.get_date() is orig_date)
def test_age_count_newer(self):
"""
Initialize a CounterRecord to one hour ago, then call age_count with 2 hours
to verify that the count won't reset. ("Reset if the stored date is older than
2 hours ago")
"""
one_hour_ago = datetime.now() - timedelta(hours=1)
date_str = time.asctime(one_hour_ago.timetuple())
count = object()
c = CounterRecord(count=count, date=date_str)
c.age_count(2 * 60 * 60)
self.assertEqual(c.get_count(), count)
def test_age_count_older(self):
"""
Initialize a CounterRecord to one hour ago, then reset the count by passing 0
to age_count (i.e. "reset if the stored date is older than now")
"""
one_hour_ago = datetime.now() - timedelta(hours=1)
date_str = time.asctime(one_hour_ago.timetuple())
count = object()
c = CounterRecord(count=count, date=date_str)
c.age_count(0)
self.assertEqual(c.get_count(), 0)
def test_counter_repr(self):
one_hour_ago = datetime.now() - timedelta(hours=1)
date_str = time.asctime(one_hour_ago.timetuple())
count = object()
c = CounterRecord(count=count, date=date_str)
c.age_count(0)
self.assertEqual(c.__repr__(), 'CountRecord <{} - {}>'.format(0, date_str))
class CounterTest(unittest.TestCase):
def test_init(self):
c = Counter()
self.assertEqual(len(c), 0)
def test_missing_key(self):
c = Counter()
key = 'key'
value = c[key]
self.assertEqual(value.get_count(), 0)
self.assertTrue(key in c)
def test_existing_key(self):
key = 'key'
value = object()
c = Counter()
c[key] = value
self.assertTrue(c[key] is value)
|
cortext/crawtextV2 | ~/venvs/crawler/lib/python2.7/site-packages/pymongo/cursor_manager.py | Python | mit | 2,846 | 0 | # Copyright 2009-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DEPRECATED - Different managers to handle when cursors are killed after
they are closed.
New cursor managers should be defined as subclasses of CursorManager and can be
installed on a connection by calling
`pymongo.connection.Connection.set_cursor_manager`.
.. versionchanged:: 2.1+
Deprecated.
"""
import weakref
class CursorManager(object):
"""The default cursor manager.
This manager will kill cursors one at a time as they are closed.
"""
def __init__(self, connection):
"""Instantiate the manager.
:Parameters:
- `connection`: a Mongo Connection
"""
self.__connection = weakref.ref(connection)
def close(self, cursor_id):
"""Close a cursor by killing it immediately.
Raises TypeError if cursor_id is not an instance of (int, long).
:Parameters:
- `cursor_id`: cursor id to close
"""
if not isinstance(cursor_id, (int, long)):
raise TypeError("cursor_id must be an instance of (int, long)")
self.__connection().kill_cursors([cursor_id])
class BatchCursorManager(CursorManager):
"""A cursor manager that kills cursors in batches.
"""
def __init__(self, connection):
"""Instantiate the manager.
:Parameters:
- `connection`: a Mongo Connection
"""
self.__dying_cursor | s = []
self.__max_dying_cursors = 20
self.__connection = weakref.ref(connection)
CursorManager.__init__(self, connection)
def __del__(self):
"""Cleanup - be sure to kill any outstanding cursors.
"""
self.__connection().kill_cursors(self.__dying_cursors)
def close(self, cursor_id):
"""Close a cursor by killing it in a batch.
Raises TypeError if cursor_id is not an instance of (int, long).
:Parameter | s:
- `cursor_id`: cursor id to close
"""
if not isinstance(cursor_id, (int, long)):
raise TypeError("cursor_id must be an instance of (int, long)")
self.__dying_cursors.append(cursor_id)
if len(self.__dying_cursors) > self.__max_dying_cursors:
self.__connection().kill_cursors(self.__dying_cursors)
self.__dying_cursors = []
|
jim-minter/ose3-demos | bin/etcd-client.py | Python | apache-2.0 | 2,017 | 0.000496 | #!/usr/bin/python
import argparse
import fcntl
import os
import requests
import socket
| import struct
import textwrap
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument("--host", nargs="?", default=socket.gethostname())
ap.add_argument("--port", nargs="?", default="4001")
ap.add_argument("cmd", choices=["ls", "watch"])
ap.add_argument("key", nargs="?", default="/ | ")
return ap.parse_args()
def get_winsize(fd=2):
TIOCGWINSZ = 21523
try:
return struct.unpack("hh", fcntl.ioctl(fd, TIOCGWINSZ, "xxxx"))
except:
return (os.environ.get("LINES", 25), os.environ.get("COLUMNS", 80))
def ls(url, key, level=""):
j = s.get(url + key, cert=cert, verify=ca).json()
for node in sorted(j["node"].get("nodes", []), key=lambda n: n["key"]):
print level + node["key"]
if "dir" in node:
ls(url, node["key"], level + " ")
else:
print textwrap.fill(node["value"], winsize[1],
initial_indent=level + " ",
subsequent_indent=level + " ",
break_on_hyphens=False)
def main(args):
global s
s = requests.Session()
global winsize
winsize = get_winsize()
if args.cmd == "ls":
ls("https://%s:%s/v2/keys" % (args.host, args.port), args.key)
else:
watch("https://%s:%s/v2/keys%s?wait=true&recursive=true" %
(args.host, args.port, args.key))
def watch(url):
wi = ""
while True:
j = s.get(url + wi, cert=cert, verify=ca).json()
print j["action"] + " " + j["node"]["key"]
if "value" in j["node"]:
print j["node"]["value"]
print
wi = "&waitIndex=%u" % (j["node"]["modifiedIndex"] + 1)
if __name__ == "__main__":
root = "/etc/origin/master"
cert = (root + "/master.etcd-client.crt", root + "/master.etcd-client.key")
ca = root + "/ca.crt"
args = parse_args()
main(parse_args())
|
2ndy/RaspIM | usr/lib/python2.6/lib2to3/pytree.py | Python | gpl-2.0 | 28,107 | 0.000391 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""
Python parse tree definitions.
This is a very concrete parse tree; we need to keep every token and
even the comments and whitespace between tokens.
There's also a pattern matching implementation here.
"""
__author__ = "Guido van Rossum <guido@python.org>"
import sys
import warnings
from StringIO import StringIO
HUGE = 0x7FFFFFFF # maximum repeat count, default max
_type_reprs = {}
def type_repr(type_num):
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name, val in python_symbols.__dict__.items():
if type(val) == int: _type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
class Base(object):
"""
Abstract base class for Node and Leaf.
This provides some default functionality and boilerplate using the
template pattern.
A node may be a subnode of at most one parent.
"""
# Default values for instance variables
type = None # int: token number (< 256) or symbol number (>= 256)
parent = None # Parent node pointer, or None
children = () # Tuple of subnodes
was_changed = False
def __new__(cls, *args, **kwds):
"""Constructor that prevents Base from being instantiated."""
assert cls is not Base, "Cannot instantiate Base"
return object.__new__(cls)
def __eq__(self, other):
"""
Compare two nodes for equality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
__hash__ = None # For Py3 compatibility.
def __ne__(self, other):
"""
Compare two nodes for inequality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return not self._eq(other)
def _eq(self, other):
"""
Compare two nodes for equality.
This is called by __eq__ and __ne__. It is only called if the two nodes
have the same type. This must be implemented by the concrete subclass.
Nodes should be considered equal if they have the same structure,
ignoring the prefix string and other context information.
"""
raise NotImplementedError
def clone(self):
"""
Return a cloned (deep) copy of self.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def post_order(self):
"""
Return a post-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def pre_order(self):
"""
Return a pre-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def set_prefix(self, prefix):
"""
Set the prefix for the node (see Leaf class).
DEPRECATED; use the prefix property directly.
"""
warnings.warn("set_prefix() is deprecated; use the prefix property",
DeprecationWarning, stacklevel=2)
self.prefix = prefix
def get_prefix(self):
"""
Return the prefix for the node (see Leaf class).
DEPRECATED; use the prefix property directly.
"""
warnings.warn("get_prefix() is deprecated; use the prefix property",
DeprecationWarning, stacklevel=2)
return self.prefix
def replace(self, new):
"""Replace this node with a new one in the parent."""
assert self.parent is not None, str(self)
assert new is not None
if not isinstance(new, list):
new = [new]
l_children = []
found = False
for ch in self.parent.children:
if ch is self:
assert not found, (self.parent.children, self, new)
if new is not None:
l_children.extend(new)
found = True
else:
l_children.append(ch)
assert found, (self.children, self, new)
self.parent.changed()
self.parent.children = l_children
for x in new:
x.parent = self.parent
self.parent = None
def get_lineno(self):
"""Return the line number which generated the invocant node."""
node = self
while not isinstance(node, Leaf):
if not node.children:
return
node = node.children[0]
return node.lineno
def changed(self):
if self.parent:
self.parent.changed()
self.was_changed = True
def remove(self):
"""
Remove the node from the tree. Returns the position of the node in its
parent's children before it was removed.
"""
if self.parent:
for i, node in enumerate(self.parent.children):
if node is self:
self.parent.changed()
del self.parent.children[i]
self.parent = None
return i
@property
def next_sibling(self):
"""
The node immediately following the invocant in their parent's children
list. If the invocant does not have a next sibling, it is None
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i+1]
except IndexError:
return None
@property
def prev_sibling(self):
"""
The node immediately preceding the invocant in their parent's children
list. If the invocant does not have a previous sibling, it is None.
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i-1]
def get_suffix(self):
"""
Return the string immediately following the invocant node. This is
effectively equivalent to node.next_sibling.prefix
"""
next_sib = self.next_sibling
if next_sib is None:
return u""
return next_sib.prefix
if sys.version_info < (3, 0):
def __str__(self):
return unicode(self).encode("ascii")
class Node(Base):
"""Concrete implementation for interior nodes."""
def __init__(self, type, children, context=None, prefix=None):
"""
Initializer.
Takes a type constant (a symbol number >= 256), a sequence of
child nodes, and an optional context keyword argument.
As a side effect, the parent pointers of the children are updated.
| """
assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
assert ch.parent is None, repr(ch)
ch.parent = self
if prefix is not None:
self.prefix = prefix
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%s, %r)" % (self.__class__.__name__,
typ | e_repr(self.type),
self.children)
def __unicode__(self):
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return u"".join(map(unicode, self.children))
if sys.version_info > (3, 0):
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.children |
crslab/Inverse-Reinforcement-Learning | irl/value_iteration.py | Python | mit | 4,821 | 0.001659 | """
Find the value function associated with a policy. Based on Sutton & Barto, 1998.
Matthew Alger, 2015
matthew.alger@anu.edu.au
"""
import numpy as np
def value(policy, n_states, transition_probabilities, reward, discount,
threshold=1e-2):
"""
Find the value function associated with a policy.
policy: List of action ints for each state.
n_states: Number of states. int.
transition_probabilities: Function taking (state, action, state) to
transition probabilities.
reward: Vector of rewards for each state.
discount: MDP discount factor. float.
threshold: Convergence threshold, default 1e-2. float.
-> Array of values for each state
"""
v = np.zeros(n_states)
diff = float("inf")
while diff > threshold:
diff = 0
for s in range(n_states):
vs = v[s]
a = policy[s]
v[s] = sum(transition_probabilities[s, a, k] *
(reward[k] + discount * v[k])
for k in range(n_states))
diff = max(diff, abs(vs - v[s]))
return v
def o | ptimal_value(n_states, n_actions, transition_probabilities, reward,
discount, threshold=1e-2):
"""
Find the optimal value function.
n_states: Number of states. int.
n_actions: Number of acti | ons. int.
transition_probabilities: Function taking (state, action, state) to
transition probabilities.
reward: Vector of rewards for each state.
discount: MDP discount factor. float.
threshold: Convergence threshold, default 1e-2. float.
-> Array of values for each state
"""
v = np.zeros(n_states)
diff = float("inf")
while diff > threshold:
diff = 0
for s in range(n_states):
max_v = float("-inf")
for a in range(n_actions):
tp = transition_probabilities[s, a, :]
max_v = max(max_v, np.dot(tp, reward + discount*v))
new_diff = abs(v[s] - max_v)
if new_diff > diff:
diff = new_diff
v[s] = max_v
return v
def find_policy(n_states, n_actions, transition_probabilities, reward, discount,
threshold=1e-2, v=None, stochastic=True):
"""
Find the optimal policy.
n_states: Number of states. int.
n_actions: Number of actions. int.
transition_probabilities: Function taking (state, action, state) to
transition probabilities.
reward: Vector of rewards for each state.
discount: MDP discount factor. float.
threshold: Convergence threshold, default 1e-2. float.
v: Value function (if known). Default None.
stochastic: Whether the policy should be stochastic. Default True.
-> Action probabilities for each state or action int for each state
(depending on stochasticity).
"""
if v is None:
v = optimal_value(n_states, n_actions, transition_probabilities, reward,
discount, threshold)
if stochastic:
# Get Q using equation 9.2 from Ziebart's thesis.
Q = np.zeros((n_states, n_actions))
for i in range(n_states):
for j in range(n_actions):
p = transition_probabilities[i, j, :]
Q[i, j] = p.dot(reward + discount*v)
Q -= Q.max(axis=1).reshape((n_states, 1)) # For numerical stability.
Q = np.exp(Q)/np.exp(Q).sum(axis=1).reshape((n_states, 1))
return Q
def _policy(s):
return max(range(n_actions),
key=lambda a: sum(transition_probabilities[s, a, k] *
(reward[k] + discount * v[k])
for k in range(n_states)))
policy = np.array([_policy(s) for s in range(n_states)])
return policy
if __name__ == '__main__':
# Quick unit test using gridworld.
import mdp.gridworld as gridworld
gw = gridworld.Gridworld(3, 0.3, 0.9)
v = value([gw.optimal_policy_deterministic(s) for s in range(gw.n_states)],
gw.n_states,
gw.transition_probability,
[gw.reward(s) for s in range(gw.n_states)],
gw.discount)
assert np.isclose(v,
[5.7194282, 6.46706692, 6.42589811,
6.46706692, 7.47058224, 7.96505174,
6.42589811, 7.96505174, 8.19268666], 1).all()
opt_v = optimal_value(gw.n_states,
gw.n_actions,
gw.transition_probability,
[gw.reward(s) for s in range(gw.n_states)],
gw.discount)
assert np.isclose(v, opt_v).all()
|
googleapis/python-aiplatform | google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py | Python | apache-2.0 | 17,093 | 0.001404 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1.types import index
from google.cloud.aiplatform_v1.types import index_service
from google.longrunning import operations_pb2 # type: ignore
from .base import IndexServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import IndexServiceGrpcTransport
class IndexServiceGrpcAsyncIOTransport(IndexServiceTransport):
"""gRPC AsyncIO backend transport for IndexService.
A service for creating and managing Vertex AI's Index
resources.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None | ,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
| The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
creden |
kennedyshead/home-assistant | homeassistant/components/lcn/config_flow.py | Python | apache-2.0 | 3,022 | 0.000662 | """Config flow to configure the LCN integration."""
import logging
import pypck
from homeassistant import config_entries
from homeassistant.const import (
CONF_HOST,
CONF_IP_ADDRESS,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
)
from .const import CONF_DIM_MODE, CONF_SK_NUM_TRIES, DOMAIN
_LOGGER = logging.getLogger(__name__)
def get_config_entry(hass, data):
"""Check config entries for already configured entries based on the ip address/port."""
return next(
(
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.data[CONF_IP_ADDRESS] == data[CONF_IP_ADDRESS]
and entry.data[CONF_PORT] == data[CONF_PORT]
),
None,
)
async def validate_connection(host_name, data):
"""Validate if a connection to LCN can be established."""
host = data[CONF_IP_ADDRESS]
port = data[CONF_PORT]
username = data[CONF_USERNAME]
password = data[CONF_PASSWORD]
sk_num_tries = data[CONF_SK_NUM_TRIES]
dim_mode = data[CONF_DIM_MODE]
settings = {
"SK_NUM_TRIES": sk_num_tries,
"DIM_MODE": pypck.lcn_defs.OutputPortDimMode[dim_mode],
}
_LOGGER.debug("Validating connection parameters to PCHK host '%s'", host_name)
connection = pypck.connection.PchkConnectionManager(
host, port, username, password, settings=settings
)
await connection.async_connect(timeout=5)
_LOGGER.debug("LCN connection validated")
await connection.async_close()
return data
class LcnFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a LCN config flow."""
VERSION = 1
async def async_step_import(self, data):
"""Import existing configuration from LCN."""
host_name = data[CONF_HOST]
# validate the imported connection parameters
try:
await validate_connection(host_name, data)
except pypck.connection.PchkAuthenticationError:
_LOGGER.warning('Authentication on PCHK "%s" failed', host_name)
return self.async_abort(reason="authentication_error")
except pypck.connection.PchkLicenseError:
_LOGGER.warning(
'Maximum number of connections on PCHK "%s" was '
"reached. An additional l | icense key is required",
host_name,
)
return self.async_abort(reason="license_error")
except TimeoutError:
_LOGGER.warning('Connection to PCHK "%s" failed', host_name)
return self.async_abort(reason="connection_timeout")
# check if we already have a host with the same addres | s configured
entry = get_config_entry(self.hass, data)
if entry:
entry.source = config_entries.SOURCE_IMPORT
self.hass.config_entries.async_update_entry(entry, data=data)
return self.async_abort(reason="existing_configuration_updated")
return self.async_create_entry(title=f"{host_name}", data=data)
|
LukasMosser/MontePetro | montepetro/models.py | Python | gpl-2.0 | 2,237 | 0.001341 | import logging
from copy import deepcopy
from mont | epetro.seed_generators import SeedGenerator
class Model(object):
def __i | nit__(self, name, seed):
self.name = name
self.seed = seed
self.seed_generator = SeedGenerator(self.seed)
self.properties = {}
self.regions = {}
def add_region(self, region):
if region.name in self.regions.keys():
logging.log(logging.ERROR,
"Encountered duplicate region" + str(region.name) + " in Model " + self.name + ".")
raise KeyError
else:
for key in region.properties.keys():
# update the regional property seed
region.properties[key].update_seed(self.seed_generator)
# delete any values
region.properties[key].values = None
self.regions[region.name] = region
def add_property(self, prop):
if prop.name in self.properties.keys():
logging.log(logging.ERROR,
"Encountered duplicate property" + str(prop.name) + " in Model " + self.name + ".")
raise KeyError
else:
prop.update_seed(self.seed_generator)
self.properties[prop.name] = prop
def add_defined_properties_to_regions(self):
for region_name, region in self.regions.iteritems():
for property_name, property in self.properties.iteritems():
if property_name not in region.properties.keys():
region.add_property(deepcopy(property))
region.properties[property_name].update_seed(self.seed_generator)
def add_regional_property(self, prop_name, prop):
for region_name, region in self.regions.iteritems():
region.properties[prop_name] = prop(region)
region.properties[prop_name].generate_values()
def run(self, config):
for region_name, region in self.regions.iteritems():
region_config = config[region_name]
for property_name, property in region.properties.iteritems():
regional_property_config = region_config[property_name]
property.generate_values(**regional_property_config)
|
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/KEGG/Enzyme/__init__.py | Python | apache-2.0 | 10,978 | 0.002824 | # Copyright 2001 by Tarjei Mikkelsen. All rights reserved.
# Copyright 2007 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code to work with the KEGG Enzyme database.
Functions:
parse - Returns an iterator giving Record objects.
Classes:
Record -- Holds the information from a KEGG Enzyme record.
"""
from __future__ import print_function
from Bio.KEGG import _write_kegg
from Bio.KEGG import _wrap_kegg
# Set up line wrapping rules (see Bio.KEGG._wrap_kegg)
rxn_wrap = [0, "",
(" + ", "", 1, 1),
(" = ", "", 1, 1),
(" ", "$", 1, 1),
("-", "$", 1, 1)]
name_wrap = [0, "",
(" ", "$", 1, 1),
("-", "$", 1, 1)]
id_wrap = lambda indent: [indent, "", (" ", "", 1, 0)]
struct_wrap = lambda indent: [indent, "", (" ", "", 1, 1)]
class Record(object):
"""Holds info from a KEGG Enzyme record.
Members:
entry The EC number (withou the 'EC ').
name A list of the enzyme names.
classname A list of the classification terms.
sysname The systematic name of the enzyme.
reaction A list of the reaction description strings.
substrate A list of the substrates.
product A list of the products.
inhibitor A list of the inhibitors.
cofactor A list of the cofactors.
effector A list of the effectors.
comment A list of the comment strings.
pathway A list of 3-tuples: (database, id, pathway)
genes A list of 2-tuples: (organism, list of gene ids)
disease A list of 3-tuples: (database, id, disease)
structures A list of 2-tuples: (database, list of struct ids)
dblinks A list of 2-tuples: (database, list of db ids)
"""
def __init__(self):
"""__init___(self)
Create a new Record.
"""
self.entry = ""
self.name = []
self.classname = []
self.sysname = []
self.reaction = []
self.substrate = []
self.product = []
self.inhibitor = []
self.cofactor = []
self.effector = []
self.comment = []
self.pathway = []
self.genes = []
self.disease = []
self.structures = []
self.dblinks = []
def __str__(self):
"""__str__(self)
Returns a string representation of this Record.
"""
return self._entry() + \
self._name() + \
self._classname() + \
self._sysname() + \
self._reaction() + \
self._substrate() + \
self._product() + \
self._inhibitor() + \
self._cofactor() + \
self._effector() + \
self._comment() + \
self._pathway() + \
self._genes() + \
self._disease() + \
self._structures() + \
self._dblinks() + \
"///"
def _entry(self):
return _write_kegg("ENTRY",
["EC " + self.entry])
def _name(self):
return _write_kegg("NAME",
[_wrap_kegg(l, wrap_rule=name_wrap)
for l in self.name])
def _classname(self):
return _write_kegg("CLASS",
self.classname)
def _sysname(self):
return _write_kegg("SYSNAME",
[_wrap_kegg(l, wrap_rule=name_wrap)
for l in self.sysname])
def _reaction(self):
return _write_kegg("REACTION",
[_wrap_kegg(l, wrap_rule=rxn_wrap)
for l in self.reaction])
def _substrate(self):
return _write_kegg("SUBSTRATE",
[_wrap_kegg(l, wrap_rule=name_wrap)
for l in self.substrate])
def _product(self):
return _write_kegg("PRODUCT",
[_wrap_kegg(l, wrap_rule=name_wrap)
for l in self.product])
def _inhibitor(self):
return _write_kegg("INHIBITOR",
[_wrap_kegg(l, wrap_rule=name_wrap)
for l in self.inhibitor])
def _cofactor(self):
return _write_kegg("COFACTOR",
[_wrap_kegg(l, wrap_rule=name_wrap)
for l in self.cofactor])
def _effector(self):
return _write_kegg("EFFECTOR",
[_wrap_kegg(l, wrap_rule=name_wrap)
for l in self.effector])
def _comment(self):
return _write_kegg("COMMENT",
[_wrap_kegg(l, wrap_rule=id_wrap(0))
for l in self.comment])
def _pathway(self):
s = []
for entry in self.pathway:
s.append(entry[0] + ": " + entry[1] + " " + entry[2])
return _write_kegg("PATHWAY",
[_wrap_kegg(l, wrap_rule=id_wrap(16))
for l in s])
def _genes(self):
s = []
for entry in self.genes:
s.append(entry[0] + ": " + " ".join(entry[1]))
return _write_kegg("GENES",
[_wrap_kegg(l, wrap_rule=id_wrap(5))
for l in s])
def _disease(self):
s = []
for entry in self.disease:
s.append(entry[0] + ": " + entry[1] + " " + entry[2])
return _write_kegg("DISEASE",
[_wrap_kegg(l, wrap_rule=id_wrap(13))
for l in s])
def _structures(self):
s = []
for entry in self.structures:
s.append(entry[0] + ": " + " ".join(entry[1]) + " ")
return _write_kegg("STRUCTURES",
[_wrap_kegg(l, wrap_rule=struct_wrap(5))
for l in s])
def _dblinks(self):
# This is a bit of a cheat that won't | work if enzyme entries
# | have more than one link id per db id. For now, that's not
# the case - storing links ids in a list is only to make
# this class similar to the Compound.Record class.
s = []
for entry in self.dblinks:
s.append(entry[0] + ": " + " ".join(entry[1]))
return _write_kegg("DBLINKS", s)
def parse(handle):
"""Parse a KEGG Enzyme file, returning Record objects.
This is an iterator function, typically used in a for loop. For
example, using one of the example KEGG files in the Biopython
test suite,
>>> with open("KEGG/enzyme.sample") as handle:
... for record in parse(handle):
... print("%s %s" % (record.entry, record.name[0]))
...
1.1.1.1 Alcohol dehydrogenase
1.1.1.62 Estradiol 17beta-dehydrogenase
1.1.1.68 Transferred to EC 1.7.99.5
1.6.5.3 NADH dehydrogenase (ubiquinone)
1.14.13.28 3,9-Dihydroxypterocarpan 6a-monooxygenase
2.4.1.68 Glycoprotein 6-alpha-L-fucosyltransferase
3.1.1.6 Acetylesterase
2.7.2.1 Acetate kinase
"""
record = Record()
for line in handle:
if line[:3] == "///":
yield record
record = Record()
continue
if line[:12] != " ":
keyword = line[:12]
data = line[12:].strip()
if keyword == "ENTRY ":
words = data.split()
record.entry = words[1]
elif keyword == "CLASS ":
record.classname.append(data)
elif keyword == "COFACTOR ":
record.cofactor.append(data)
elif keyword == "COMMENT ":
record.comment.append(data)
elif keyword == "DBLINKS ":
if ":" in data:
key, values = data.split(":")
values = values.split()
row = (key, values)
record.dblinks.append(row)
else:
row = record.dblinks[-1]
key, v |
antoinecarme/pyaf | tests/artificial/transf_Anscombe/trend_Lag1Trend/cycle_0/ar_12/test_artificial_32_Anscombe_Lag1Trend_0_12_20.py | Python | bsd-3-clause | 263 | 0.087452 | imp | ort pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 0, transform = "Anscombe", sigma = 0.0, exog_count = 2 | 0, ar_order = 12); |
joker-ace/internships-reviews | src/utils/__init__.py | Python | mit | 21 | 0 | __author__ = ' | joker' | |
jdowner/qtile | libqtile/widget/keyboardlayout.py | Python | mit | 4,269 | 0.000937 | # Copyright (c) 2013 Jacob Mourelos
# Copyright (c) 2014 Shepilov Vladislav
# Copyright (c) 2014-2015 Sean Vig
# Copyright (c) 2014 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# | copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT | LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import subprocess
from subprocess import CalledProcessError
from . import base
from libqtile.log_utils import logger
kb_regex = re.compile('layout:\s+(?P<layout>\w+)')
class KeyboardLayout(base.InLoopPollText):
"""Widget for changing and displaying the current keyboard layout
It requires setxkbmap to be available in the system.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("update_interval", 1, "Update time in seconds."),
("configured_keyboards", ["us"], "A list of predefined keyboard layouts "
"represented as strings. For example: "
"['us', 'us colemak', 'es', 'fr']."),
]
def __init__(self, **config):
base.InLoopPollText.__init__(self, **config)
self.add_defaults(KeyboardLayout.defaults)
def button_press(self, x, y, button):
if button == 1:
self.next_keyboard()
def next_keyboard(self):
"""Set the next layout in the list of configured keyboard layouts as
new current layout in use
If the current keyboard layout is not in the list, it will set as new
layout the first one in the list.
"""
current_keyboard = self.keyboard
if current_keyboard in self.configured_keyboards:
# iterate the list circularly
next_keyboard = self.configured_keyboards[
(self.configured_keyboards.index(current_keyboard) + 1) %
len(self.configured_keyboards)]
else:
next_keyboard = self.configured_keyboards[0]
self.keyboard = next_keyboard
def poll(self):
return self.keyboard.upper()
def get_keyboard_layout(self, setxkbmap_output):
matches = kb_regex.search(setxkbmap_output)
if matches is None:
return 'ERR'
return matches.group('layout')
@property
def keyboard(self):
"""Return the currently used keyboard layout as a string
Examples: "us", "us dvorak". In case of error returns "unknown".
"""
try:
command = 'setxkbmap -verbose 10'
setxkbmap_output = self.call_process(command.split(' '))
keyboard = self.get_keyboard_layout(setxkbmap_output)
return str(keyboard)
except CalledProcessError as e:
logger.error('Can not get the keyboard layout: {0}'.format(e))
except OSError as e:
logger.error('Please, check that xset is available: {0}'.format(e))
return "unknown"
@keyboard.setter
def keyboard(self, keyboard):
command = ['setxkbmap']
command.extend(keyboard.split(" "))
try:
subprocess.check_call(command)
except CalledProcessError as e:
logger.error('Can not change the keyboard layout: {0}'.format(e))
except OSError as e:
logger.error('Please, check that setxkbmap is available: {0}'.format(e))
def cmd_next_keyboard(self):
"""Select next keyboard layout"""
self.next_keyboard()
|
kxgames/seacow_economy_minigame | src/gui.py | Python | gpl-3.0 | 2,254 | 0.005768 | #!/usr/bin/env python3
import kxg
import pyglet
from .world import Player
from .messages import SetupPlayer, MakeInvestment
class Gui:
def __init__(self):
self.window = pyglet.window.Window()
self.window.set_visible(True)
self.batch = pyglet.graphics.Batch()
self.texts = []
self.background_group = pyglet.graphics.OrderedGroup(0)
self.buttons_group = pyglet.graphics.OrderedGroup(1)
self.text_group = pyglet.graphics.OrderedGroup(2)
def on_refresh_gui(self):
self.window.clear()
self.batch.draw()
def create_text(self, initial_message, x_coor, y_coor):
self.texts.append(pyglet.text.Label(
ini | tial_message,
font_name='Arial',
font_size=40,
x = x_coor, y = y_coor,
anchor_x='left', anchor_y='bottom',
batch= self.batch, group= self.text_group
))
return self.texts[-1]
c | lass GuiActor(kxg.Actor):
def __init__(self):
super().__init__()
self.player = Player()
def on_setup_gui(self, gui):
self.gui = gui
self.gui.window.set_handlers(self)
self.supply_label = self.gui.create_text('', 20, 280)
self.demand_label = self.gui.create_text('', 20, 220)
self.price_label = self.gui.create_text('', 20, 160)
self.wealth_label = self.gui.create_text('', 20, 80)
self.income_label = self.gui.create_text('', 20, 20)
def on_start_game(self, num_players):
self >> SetupPlayer(self.player)
self >> MakeInvestment(self.player.cities[0],
self.world.investment_tree['large grain farm'])
def on_draw(self):
grains = self.world.industry_tree['grains']
self.wealth_label.text = '${:.0f}'.format(self.player.wealth)
self.income_label.text = '${:.0f}/sec'.format(self.player.wealth_per_sec)
self.supply_label.text = 'Supply (grains): {:.0f}/sec'.format(self.player.cities[0].calculate_supply(grains))
self.demand_label.text = 'Demand (grains): {:.0f}/sec'.format(grains.calculate_demand())
self.price_label.text = 'Price (grains): ${:.0f}'.format(grains.calculate_price())
self.gui.on_refresh_gui()
|
bernard357/smart-video-counter | source/updater_mysql.py | Python | apache-2.0 | 2,420 | 0.000413 | # -*- coding: utf-8 -*-
"""Mysql module"""
import datetime
import logging
import MySQLdb as msql
class MysqlUpdater(object):
"""
Updates a database
"""
def __init__(self, settings=None):
"""
Sets updater settings
:param settings: the parameters for this updater
:type settings: ``dict``
"""
if settings is None:
settings = {}
self.settings = settings
def use_database(self):
"""
Opens a database
"""
pass
def reset_database(self):
"""
Recreates a database
"""
pass
def push(self, data):
"""
Pushes one update to the database
:param data: new record, e.g., "camera42 5 2 2"
:type data: ``str``
"""
try:
logging.debug('connecting to SQL store')
db = msql.connect(host=self.settings.get('host', 'localhost'),
user=self.settings.get('user', 'root'),
passwd=self.settings.get('password', 'root'),
db=self.settings.get('database', 'smart-video-counter'),
connect_timeout=2)
cursor = db.cursor()
sql_insert = ('INSERT INTO `{}`'
' (`id`,'
' `sender`,'
' `time_stamp`,'
' `standing`,'
' `moves`,'
' `faces`)'
' VALUES'
' (NULL,'
' %s,'
' %s,'
' %s,'
' %s,'
' %s)')
items = data.split(' ')
while len(items) < 4:
items.append('0')
| cursor.execute(sql_insert,
(items[0],
datetime.datetime.utcnow(),
int(items[1]),
int(items[2]),
int(items[3])))
db.commit()
cursor.close()
logging.debug('SQL store has been updated')
except Exception as feedback:
logging.warning('Warning: SQL store could not be updated')
logging | .error(str(feedback))
|
DataONEorg/d1_python | test_utilities/src/d1_test/instance_generator/tests/test_person.py | Python | apache-2.0 | 1,335 | 0 | #!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http | ://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import d1_test.d1_test_case
import d1_test.instance_generator.person
# ===================================== | ==========================================
@d1_test.d1_test_case.reproducible_random_decorator("TestPerson")
class TestPerson(d1_test.d1_test_case.D1TestCase):
def test_1000(self):
"""generate()"""
person_list = [
d1_test.instance_generator.person.generate().toxml("utf-8")
for _ in range(3)
]
self.sample.assert_equals(person_list, "inst_gen_person")
|
nicholasserra/sentry | src/sentry/utils/runner.py | Python | bsd-3-clause | 438 | 0 | #!/usr/bin/env python
"""
sentry.utils.runner
~~~~~~~~~~~~~~~~~~~
| :copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
# Backwards compatibility
from sentry.runner import configure, main # NOQA
import warnings
warnings.warn("'sentry.utils.runn | er' has moved to 'sentry.runner'",
DeprecationWarning)
|
abesto/fig | compose/cli/main.py | Python | apache-2.0 | 16,398 | 0.002012 | from __future__ import print_function
from __future__ import unicode_literals
import logging
import sys
import re
import signal
from operator import attrgetter
from inspect import getdoc
import dockerpty
from .. import __version__
from ..project import NoSuchService, ConfigurationError
from ..service import BuildError, CannotBeScaledError
from .command import Command
from .formatter import Formatter
from .log_printer import LogPrinter
from .utils import yesno
from docker.errors import APIError
from .errors import UserError
from .docopt_command import NoSuchCommand
log = logging.getLogger(__name__)
def main():
setup_logging()
try:
command = TopLevelCommand()
command.sys_dispatch()
except KeyboardInterrupt:
log.error("\nAborting.")
sys.exit(1)
except (UserError, NoSuchService, ConfigurationError) as e:
log.error(e.msg)
sys.exit(1)
except NoSuchCommand as e:
log.error("No such command: %s", e.command)
log.error("")
log.error("\n".join(parse_doc_section("commands:", getdoc(e.supercommand))))
sys.exit(1)
except APIError as e:
log.error(e.explanation)
sys.exit(1)
except BuildError as e:
log.error("Service '%s' failed to build: %s" % (e.service.name, e.reason))
sys.exit(1)
def setup_logging():
console_handler = logging.StreamHandler(sys.stderr)
console_handler.setFormatter(logging.Formatter())
console_handler.setLevel(logging.INFO)
root_logger = logging.getLogger()
root_logger.addHandler(console_handler)
root_logger.setLevel(logging.DEBUG)
# Disable requests logging
logging.getLogger("requests").propagate = False
# stolen from docopt master
def parse_doc_section(name, source):
pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)',
re.IGNORECASE | re.MULTILINE)
return [s.strip() for s in pattern.findall(source)]
class TopLevelCommand(Command):
"""Fast, isolated development environments using Docker.
Usage:
docker-compose [options] [COMMAND] [ARGS...]
docker-compose -h|--help
Options:
--verbose Show more output
--version Print version and exit
-f, --file FILE Specify an alternate compose file (default: docker-compose.yml)
-p, --project-name NAME Specify an alternate project name (def | ault: directory name)
Commands:
build Build or rebuild services
help Get help on a command
kill Kill containers
logs View output from containers
port Print the public port for a port binding
ps List containers
pull Pulls service images
rm Remove stopped containers
run Run a one-off command
scale Set number of containers for a service
start Start services
stop Stop se | rvices
restart Restart services
up Create and start containers
"""
def docopt_options(self):
options = super(TopLevelCommand, self).docopt_options()
options['version'] = "docker-compose %s" % __version__
return options
def build(self, project, options):
"""
Build or rebuild services.
Services are built once and then tagged as `project_service`,
e.g. `composetest_db`. If you change a service's `Dockerfile` or the
contents of its build directory, you can run `compose build` to rebuild it.
Usage: build [options] [SERVICE...]
Options:
--no-cache Do not use cache when building the image.
"""
no_cache = bool(options.get('--no-cache', False))
project.build(service_names=options['SERVICE'], no_cache=no_cache)
def help(self, project, options):
"""
Get help on a command.
Usage: help COMMAND
"""
command = options['COMMAND']
if not hasattr(self, command):
raise NoSuchCommand(command, self)
raise SystemExit(getdoc(getattr(self, command)))
def kill(self, project, options):
"""
Force stop service containers.
Usage: kill [options] [SERVICE...]
Options:
-s SIGNAL SIGNAL to send to the container.
Default signal is SIGKILL.
"""
signal = options.get('-s', 'SIGKILL')
project.kill(service_names=options['SERVICE'], signal=signal)
def logs(self, project, options):
"""
View output from containers.
Usage: logs [options] [SERVICE...]
Options:
--no-color Produce monochrome output.
"""
containers = project.containers(service_names=options['SERVICE'], stopped=True)
monochrome = options['--no-color']
print("Attaching to", list_containers(containers))
LogPrinter(containers, attach_params={'logs': True}, monochrome=monochrome).run()
def port(self, project, options):
"""
Print the public port for a port binding.
Usage: port [options] SERVICE PRIVATE_PORT
Options:
--protocol=proto tcp or udp (defaults to tcp)
--index=index index of the container if there are multiple
instances of a service (defaults to 1)
"""
service = project.get_service(options['SERVICE'])
try:
container = service.get_container(number=options.get('--index') or 1)
except ValueError as e:
raise UserError(str(e))
print(container.get_local_port(
options['PRIVATE_PORT'],
protocol=options.get('--protocol') or 'tcp') or '')
def ps(self, project, options):
"""
List containers.
Usage: ps [options] [SERVICE...]
Options:
-q Only display IDs
"""
containers = sorted(
project.containers(service_names=options['SERVICE'], stopped=True) +
project.containers(service_names=options['SERVICE'], one_off=True),
key=attrgetter('name'))
if options['-q']:
for container in containers:
print(container.id)
else:
headers = [
'Name',
'Command',
'State',
'Ports',
]
rows = []
for container in containers:
command = container.human_readable_command
if len(command) > 30:
command = '%s ...' % command[:26]
rows.append([
container.name,
command,
container.human_readable_state,
container.human_readable_ports,
])
print(Formatter().table(headers, rows))
def pull(self, project, options):
"""
Pulls images for services.
Usage: pull [options] [SERVICE...]
Options:
--allow-insecure-ssl Allow insecure connections to the docker
registry
"""
insecure_registry = options['--allow-insecure-ssl']
project.pull(
service_names=options['SERVICE'],
insecure_registry=insecure_registry
)
def rm(self, project, options):
"""
Remove stopped service containers.
Usage: rm [options] [SERVICE...]
Options:
--force Don't ask to confirm removal
-v Remove volumes associated with containers
"""
all_containers = project.containers(service_names=options['SERVICE'], stopped=True)
stopped_containers = [c for c in all_containers if not c.is_running]
if len(stopped_containers) > 0:
print("Going to remove", list_containers(stopped_containers))
if options.get('--force') \
or yesno("Are you sure? [yN] ", default=False):
project.remove_stopped(
service_names=options['SERVICE'],
v=options.get(' |
shacknetisp/fourthevaz | modules/core/api/__init__.py | Python | mit | 3,378 | 0.00148 | # -*- coding: utf-8 -*-
import configs.module
import wsgiref.simple_server
import select
import json
import bot
from urllib import parse
import irc.fullparse
import irc.splitparse
import os.path
def init(options):
m = configs.module.Module(__name__)
if 'wserver' in options['server'].state:
del options['server'].state['wserver']
try:
if 'apiport' in options['server'].entry:
options['server'].state[
'wserver'] = wsgiref.simple_server.make_server(
'', options['server'].entry['apiport'],
application(options['server']))
print(('Opening API server on %d' % options[
'server'].entry['apiport']))
except OSError:
print(('Unable to open API server on %d' % options[
'server'].entry['apiport']))
m.set_help('Access various bot functions from a json API.')
m.add_timer_hook(1 * 1000, timer)
m.add_base_hook('api.action.command', apiactioncommand)
m.add_base_hook('api.path.interface', apipathinterface)
return m
class application:
def __init__(self, server):
self.server = server
def __call__(self, environ, start_response):
ret = {
'status': 'error',
'message': 'unknown',
}
start_response('200 OK',
[('content-type', 'text/html;charset=utf-8')])
path = environ['PATH_INFO'].strip('/')
q = parse.parse_qs(environ['QUERY_STRING'])
action = q['action'][0] if 'action' in q else ''
try:
if path:
ret['message'] = 'unknown request'
ret['status'] = 'error'
self.server.do_bas | e_hook('api.path.%s' % path,
ret, self.server, q, environ)
else:
ret['message'] = 'invalid action'
ret['status'] = 'error'
self.server.do_base_hook('api.action.%s' % action,
ret, self.server, q, | environ)
if '_html' in ret:
return [ret['_html'].encode('utf-8')]
except KeyError:
pass
return [json.dumps(ret).encode('utf-8')]
def apiactioncommand(ret, server, q, environ):
del ret['message']
ip = environ['REMOTE_ADDR']
if 'command' not in q:
ret['message'] = 'no command'
ret['status'] = 'error'
if server.type == 'irc':
def process_message(i):
sp = irc.splitparse.SplitParser(i)
fp = irc.fullparse.FullParse(
server, sp, nomore=True)
return fp.execute(sp.text)
ret['output'] = process_message(
':%s!%s PRIVMSG %s :%s' % (':' + ip, "~api@" + ip,
server.nick,
q['command'][0],
))
elif server.type == 'file':
ret['output'] = server.fp(server, q['command'][0])
ret['status'] = 'good'
def apipathinterface(ret, server, q, environ):
del ret['message']
ret['_html'] = open(os.path.dirname(__file__) + '/interface.html').read()
ret['status'] = 'good'
def timer():
for server in bot.servers():
if 'wserver' not in server.state:
continue
wserver = server.state['wserver']
inr, _, _ = select.select([wserver], [], [], 0.01)
if inr:
wserver.handle_request() |
jledbetter/openhatch | mysite/profile/migrations/0088_add_field_portfolioentry_receive_maintainer_updates.py | Python | agpl-3.0 | 17,027 | 0.008398 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PortfolioEntry.receive_maintainer_updates'
db.add_column('profile_portfolioentry', 'receive_maintainer_updates', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'PortfolioEntry.receive_maintainer_updates'
db.delete_column('profile_portfolioentry', 'receive_maintainer_updates')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': | ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'customs.webresponse': {
'Meta': {'object_name': 'WebResponse'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response_headers': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'text': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields. | TextField', [], {})
},
'profile.citation': {
'Meta': {'object_name': 'Citation'},
'contributor_role': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.DataImportAttempt']", 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'distinct_months': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'first_commit_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_due_to_duplicate': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'old_summary': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
'portfolio_entry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.PortfolioEntry']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'profile.dataimportattempt': {
'Meta': {'object_name': 'DataImportAttempt'},
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'query': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'web_response': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customs.WebResponse']", 'null': 'True'})
},
'profile.forwarder': {
'Meta': {'object_name': 'Forwarder'},
'address': ('django.db.models.fields.TextField', [], {}),
'expires_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stops_being_listed_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'profile.link_person_tag': {
'Meta': {'object_name': 'Link_Person_Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.link_project_tag': {
'Meta': {'object_name': 'Link_Project_Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], |
tchellomello/home-assistant | homeassistant/components/songpal/__init__.py | Python | apache-2.0 | 1,582 | 0.002528 | """The songpal component."""
from collections import OrderedDict
import logging
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_NAME
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from .const import CONF_ENDPOINT, DOMAIN
_LOGGER = logging.getLogger(__name__)
SONGPAL_CONFIG_SCHEMA = vol.Schema(
{vol.Optional(CONF_NAME): cv.string, vol.Required(CONF_ENDPOINT): cv.string}
)
CONFIG_SCHEMA = vol.Schema(
{vol.Optional(DOMAIN): vol.All(cv.ensure_list, [SONGPAL_CONFIG_SCHEMA])},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAss | istantType, config: OrderedDict) -> bool:
"""Set up songpal environment."""
conf = config.get(DOMAIN)
if conf is None:
return True
for config_entry in conf:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IM | PORT},
data=config_entry,
),
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Set up songpal media player."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "media_player")
)
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Unload songpal media player."""
return await hass.config_entries.async_forward_entry_unload(entry, "media_player")
|
Rohit4198/Calendargui | main.py | Python | mit | 4,749 | 0.003159 |
import Tkinter
import calendar
import time
import tkFont
import ttk
def sequence(*functions): # to run 2 or more functions on button click
for function in functions:
function()
def update(y, m, tx, curdate): # generate calendar with right colors
calstr = calendar.month(y, m)
tx.configure(state=Tkinter.NORMAL)
tx.delete('0.0', Tkinter.END) # remove previous calendar
tx.insert(Tkinter.INSERT, calstr)
for i in range(2, 9):
tx.tag_add("others", '{}.0'.format(i), '{}.end'.format(i)) # tag days for coloring
if len(tx.get('{}.0'.format(i), '{}.end'.format(i))) == 20:
tx.tag_add("sun", '{}.end-2c'.format(i), '{}.end'.format(i))
tx.tag_config("sun", foreground="#FB465E")
tx.tag_config("others", foreground="#000000")
tx.tag_add("head", '1.0', '1.end')
if curdate[0] == y and curdate[1] == m:
index = tx.search(str(curdate[2]), '2.0') # search for today's date
tx.tag_add("cur", index, '{}+2c'.format(index)) # highlight today's date
tx.tag_config("cur", background="blue", foreground="white")
tx.tag_config("head", font=segoe, foreground="#8B0000", justify=Tk | inter.CENTER)
tx.configure(state=Tkinter.DISABLED) # make text view not editable
top = Tkinter.Tk()
top.title("Calendar")
top.minsize(200, 250)
top.maxsize(200, 200)
logo = Tkinter.PhotoImage(file="r.gif")
top.tk.call('wm', 'iconphoto', top._w, logo)
segoe = t | kFont.Font(family='Segoe UI')
curtime = time.localtime()
year = Tkinter.StringVar()
month = Tkinter.StringVar()
yearInt = curtime[0]
monthInt = curtime[1]
dateInt = curtime[2]
HLayout = ttk.PanedWindow(top, orient=Tkinter.HORIZONTAL)
ctx = Tkinter.Text(top, padx=10, pady=10, bg="#E6E6FA", relief=Tkinter.FLAT, height=9,
width=20) # text view to passing to functions
def nextb(): # on click next button
global monthInt, yearInt, ctx, curtime
monthInt += 1
if monthInt > 12:
monthInt = monthInt % 12
yearInt += 1
update(yearInt, monthInt, ctx, curtime)
def prevb(): # on click previous button
global monthInt, yearInt, ctx, curtime
monthInt -= 1
if monthInt < 1:
monthInt = 12
yearInt -= 1
update(yearInt, monthInt, ctx, curtime)
def okcall(): # ok button click inside go to date window
global monthInt, yearInt, ctx, curtime
if (year.get().isdigit() and month.get().isdigit()) and (
(0 < int(year.get()) < 10000) and (0 < int(month.get()) < 13)):
yearInt = int(year.get())
monthInt = int(month.get())
update(yearInt, monthInt, ctx, curtime)
def gotod(): # go to date window creation
newtop = Tkinter.Toplevel()
newtop.title("Calendar")
newtop.maxsize(190, 190)
newtop.focus_set()
newtop.tk.call('wm', 'iconphoto', newtop._w, logo)
HLayout = ttk.PanedWindow(newtop, orient=Tkinter.HORIZONTAL)
HLayout2 = ttk.PanedWindow(newtop, orient=Tkinter.HORIZONTAL)
yearText = ttk.Label(HLayout, text="Year :")
yearEdit = ttk.Entry(HLayout, textvariable=year)
monthText = ttk.Label(HLayout2, text="Month:")
monthEdit = ttk.Entry(HLayout2, textvariable=month)
okb = ttk.Button(newtop, text="Ok", command=lambda: sequence(okcall, newtop.destroy))
yearText.pack(side=Tkinter.LEFT)
yearEdit.pack(side=Tkinter.RIGHT)
monthText.pack(side=Tkinter.LEFT)
monthEdit.pack(side=Tkinter.RIGHT)
HLayout.pack()
HLayout2.pack()
okb.pack()
newtop.mainloop()
def about_show(): # about window creation
newtop = Tkinter.Toplevel()
newtop.title("Calendar")
newtop.maxsize(190, 190)
newtop.focus_set()
newtop.tk.call('wm', 'iconphoto', newtop, logo)
about = ttk.LabelFrame(newtop, text="About")
Tkinter.Label(about, text="Calendar Gui").pack()
Tkinter.Label(about, text="Developer: Rohit Rane", compound=Tkinter.BOTTOM).pack()
about.pack()
newtop.mainloop()
update(yearInt, monthInt, ctx, curtime) # for first run, generate calendar
prev = ttk.Button(HLayout, text="<<", command=prevb)
nex = ttk.Button(HLayout, text=">>", command=nextb)
goto = ttk.Button(top, text="Goto", command=gotod)
menubar = Tkinter.Menu(top, relief=Tkinter.FLAT)
filemenu = Tkinter.Menu(menubar, tearoff=0, relief=Tkinter.FLAT)
helpmenu = Tkinter.Menu(menubar, tearoff=0, relief=Tkinter.FLAT)
filemenu.add_command(label="Goto", command=gotod)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=top.destroy)
helpmenu.add_command(label="About", command=about_show)
menubar.add_cascade(label="File", menu=filemenu)
menubar.add_cascade(label="Help", menu=helpmenu)
top.config(menu=menubar)
prev.pack(side=Tkinter.LEFT)
nex.pack(side=Tkinter.RIGHT)
ctx.pack()
HLayout.pack()
goto.pack()
top.mainloop()
|
maxive/erp | addons/website_hr_recruitment/tests/test_website_hr_recruitment.py | Python | agpl-3.0 | 830 | 0.003614 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.api import Environment
import odoo.tests
@odoo.tests.tagged('post_install', '-at | _install')
class TestWebsiteHrRecruitmentForm(odoo.tests.HttpCase):
def test_tour(self):
self.phantom_js("/", "odoo.__DEBUG__.services['web_tour.tour'].run('website_hr_recruitment_tour')", "odoo.__DEBUG__.services['web_tour.tour'].tours.website_hr_recruitment_tour.ready")
# check result
record = self.env['hr.applicant'].search([('description', | '=', '### HR RECRUITMENT TEST DATA ###')])
self.assertEqual(len(record), 1)
self.assertEqual(record.partner_name, "John Smith")
self.assertEqual(record.email_from, "john@smith.com")
self.assertEqual(record.partner_phone, '118.218')
|
ygol/odoo | addons/website_blog/models/website.py | Python | agpl-3.0 | 2,036 | 0.002456 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, _
from odoo.addons.http_routing.models.ir_http import url_for
class Website(models.Model):
_inherit = "website"
@api.model
def page_search_dependencies(self, page_id=False):
dep = super(Website, self).page_search_depe | ndencies(page_id=page_id)
page = self.env['website.page'].browse(int(page_id))
path = page.url
dom = [
('content', 'ilike', path)
]
posts = self.env['blog.post'].search(dom)
if posts:
page_key = _('Blog Post')
if len(posts) > 1:
page_ | key = _('Blog Posts')
dep[page_key] = []
for p in posts:
dep[page_key].append({
'text': _('Blog Post <b>%s</b> seems to have a link to this page !') % p.name,
'item': p.name,
'link': p.website_url,
})
return dep
@api.model
def page_search_key_dependencies(self, page_id=False):
dep = super(Website, self).page_search_key_dependencies(page_id=page_id)
page = self.env['website.page'].browse(int(page_id))
key = page.key
dom = [
('content', 'ilike', key)
]
posts = self.env['blog.post'].search(dom)
if posts:
page_key = _('Blog Post')
if len(posts) > 1:
page_key = _('Blog Posts')
dep[page_key] = []
for p in posts:
dep[page_key].append({
'text': _('Blog Post <b>%s</b> seems to be calling this file !') % p.name,
'item': p.name,
'link': p.website_url,
})
return dep
def get_suggested_controllers(self):
suggested_controllers = super(Website, self).get_suggested_controllers()
suggested_controllers.append((_('Blog'), url_for('/blog'), 'website_blog'))
return suggested_controllers
|
elbeardmorez/quodlibet | quodlibet/tests/test_po.py | Python | gpl-2.0 | 11,377 | 0.000176 | # -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from tests import TestCase, skipUnless
from tests.helper import ListWithUnused as L
import os
import re
import pytest
try:
import polib
except ImportError:
polib = None
import quodlibet
from quodlibet.util import get_module_dir
from quodlibet.util.string.titlecase import human_title
from gdist import gettextutil
QL_BASE_DIR = os.path.dirname(get_module_dir(quodlibet))
PODIR = os.path.join(QL_BASE_DIR, "po")
def has_gettext_util():
try:
gettextutil.check_version()
except gettextutil.GettextError:
return False
return True
class MissingTranslationsException(Exception):
def __init__(self, missing):
msg = ("No reference in POTFILES.in to: " +
", ".join(missing))
super(MissingTranslationsException, self).__init__(msg)
@pytest.mark.skipif(not has_gettext_util(), reason="no gettext")
def test_potfile_format():
with gettextutil.create_pot(PODIR, "quodlibet"):
gettextutil.check_pot(PODIR, "quodlibet")
class TPOTFILESIN(TestCase):
def test_no_extra_entries(self):
"""Works without polib installed..."""
with open(os.path.join(PODIR, "POTFILES.in")) as f:
for fn in f:
path = os.path.join(QL_BASE_DIR, fn.strip())
assert os.path.isfile(path), \
"Can't read '%s' from POTFILES.in" % path
def test_missing(self):
try:
gettextutil.check_version()
except gettextutil.GettextError:
return
results = gettextutil.get_missing(PODIR, "quodlibet")
if results:
raise MissingTranslationsException(results)
@skipUnless(polib, "polib not found")
class TPot(TestCase):
@classmethod
def setUpClass(cls):
gettextutil.check_version()
with gettextutil.create_pot(PODIR, "quodlibet") as pot_path:
cls.pot = polib.pofile(pot_path)
def conclude(self, fails, reason):
if fails:
def format_occurrences(e):
return ', '.join('%s:%s' % o for o in e.occurrences)
messages = [
"'%s' (%s)" % (e.msgid, format_occurrences(e)) for e in fails
]
self.fail(
"One or more messages did not pass (%s):\n" % reason
+ '\n'.join(messages))
def test_multiple_format_placeholders(self):
fails = []
reg = re.compile(r"((?<!%)%[sbcdoxXneEfFgG]|\{\})")
for entry in self.pot:
if len(reg.findall(entry.msgid)) > 1:
fails.append(entry)
self.conclude(fails,
"uses multiple non-named format placeholders")
def test_label_capitals(self):
""" Check that various input labels (strings ending with a ':') are
written with proper capitalization.
Examples:
Dough amount: - ok
Salt: - ok
Channel eggs through the Internet: - ok
All Caps: - title case can't be used for labels
Caveats:
The test doesn't yet know which words are usually capitalized, so:
Send to Kitchen: - will erroneously pass the test
"""
fails = []
ok_labels = L('Local _IP:', 'Songs with MBIDs:')
for entry in self.pot:
if not entry.msgid.endswith(':'):
continue
if ' ' not in entry.msgid.strip():
continue
if entry.msgid == human_title(entry.msgid):
if entry.msgid not in ok_labels:
fails.append(entry)
ok_labels.check_unused()
self.conclude(fails, "title case used for a label")
def test_whitespace(self):
""" Check that there are no more than 1 space character ' ' in a row.
Examples:
"Quod Libet" - ok
"Quod Libet" - extra whitespace
"Traceback:\n <snip>" - ok
Caveats:
linebreaks and presumably other special characters in the messages
are stored as literals, so when matching them with regular
expressions, don't forget to use double backslash.
"""
fails = []
regex = re.compile(r'[^\\n] {2,}')
for entry in self.pot:
if regex.findall(entry.msgid):
fails.append(entry)
self.conclude(fails, "extra whitespace")
def test_punctuation(self):
""" Check that punctuation marks are used properly.
Examples:
Hello! - ok
Hello ! - extra whitespace
HH:MM:SS - ok
example.com - ok
Open .tags file - ok
Hello,world - missing whitespace
"""
fails = []
regex = re.compile(r'\s[.,:;!?](?![a-z])|'
r'[a-z](?<!people)[,:;][a-zA-Z]')
for entry in self.pot:
if regex.findall(entry.msgid):
fails.append(entry)
self.conclude(fails, "check punctuation")
def test_ellipsis(self):
# https://wiki.gnome.org/Initiatives/GnomeGoals/UnicodeUsage
for entry in self.pot:
self.assertFalse(
"..." in entry.msgid,
msg=u"%s should use '…' (ELLIPSIS) instead of '...'" % entry)
def test_markup(self):
# https://wiki.gnome.org/Initiatives/GnomeGoals/RemoveMarkupInMessages
fails = []
for entry in self.pot:
# This only che | cks strings starting and ending with a tag.
# TODO: fix for all cases by adding a translator comment
# and insert
if re.match("<.*?>.*</.*?>", entry.msgid):
fails.append(entry)
self.conclude(fails, "contains markup, remove it!")
def test_terms_letter_case(self):
""" Check that some words are alw | ays written with a specific
combination of lower and upper case letters.
Examples:
MusicBrainz - ok
musicbrainz - lower case letters
musicbrainz_track_id - ok
musicbrainz.org - ok
"""
terms = (
'AcoustID', 'D-Bus', 'Ex Falso', 'GNOME', 'GStreamer', 'Internet',
'iPod', 'Last.fm', 'MusicBrainz', 'Python', 'Quod Libet',
'Replay Gain', 'ReplayGain', 'Squeezebox', 'Wikipedia')
ok_suffixes = ('_', '.org')
fails = []
for entry in self.pot:
for term in terms:
if term.lower() not in entry.msgid.lower():
continue
i = entry.msgid.lower().find(term.lower())
if entry.msgid[i + len(term):].startswith(ok_suffixes):
continue
if term not in entry.msgid:
fails.append(entry)
self.conclude(fails, "incorrect letter case for a term")
def test_terms_spelling(self):
""" Check if some words are misspelled. Some of the words are already
checked in test_terms_letter_case, but some misspellings include
not only letter case.
Examples:
Last.fm - ok
LastFM - common misspelling
"""
incorrect_terms = ('Acoustid.org', 'ExFalso', 'LastFM', 'QuodLibet')
fails = []
for entry in self.pot:
for term in incorrect_terms:
if term in entry.msgid:
fails.append(entry)
self.conclude(fails, "incorrect spelling for a term")
def test_leading_and_trailing_spaces(self):
fails = []
for entry in self.pot:
if entry.msgid.strip() != entry.msgid:
fails.append(entry)
self.conclude(fails, "leading or trailing spaces")
class POMixin(object):
def test_pos(self):
try:
gettextutil.check_version()
except gettextutil.GettextError:
return
|
inuitwallet/plunge | client/client.py | Python | mit | 21,310 | 0.004927 | #! /usr/bin/env python
"""
The MIT License (MIT)
Copyright (c) 2015 creon (creon.nu@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above cop | yright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, A | RISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import sys
import time
import json
import tempfile
import signal
import subprocess
import threading
import logging
import logging.handlers
import socket
from math import ceil
from thread import start_new_thread
from exchanges import *
from trading import *
from utils import *
_wrappers = {'bittrex': Bittrex, 'ccedk': CCEDK, 'bitcoincoid': BitcoinCoId, 'bter': BTER, 'testing': Peatio}
_mainlogger = None
def getlogger():
global _mainlogger
if not _mainlogger: # initialize logger
if not os.path.isdir('logs'):
os.makedirs('logs')
_mainlogger = logging.getLogger('Client')
_mainlogger.setLevel(logging.DEBUG)
sh = logging.handlers.SocketHandler('', logging.handlers.DEFAULT_TCP_LOGGING_PORT)
sh.setLevel(logging.DEBUG)
fh = logging.FileHandler('logs/%d.log' % time.time())
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s', datefmt="%Y/%m/%d-%H:%M:%S")
sh.setFormatter(formatter)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
_mainlogger.addHandler(sh)
_mainlogger.addHandler(fh)
_mainlogger.addHandler(ch)
return _mainlogger
# one request signer thread for each key and unit
class RequestThread(ConnectionThread):
def __init__(self, conn, key, secret, exchange, unit, address, sampling, cost, logger=None):
super(RequestThread, self).__init__(conn, logger)
self.key = key
self.secret = secret
self.exchange = exchange
self.unit = unit
self.initsampling = sampling
self.sampling = sampling
self.address = address
self.errorflag = False
self.trials = 0
self.exchangeupdate = 0
self.cost = cost.copy()
def register(self):
response = self.conn.post('register', {'address': self.address, 'key': self.key, 'name': repr(self.exchange)},
trials=3, timeout=10)
if response['code'] == 0: # reset sampling in case of server restart
self.sampling = self.initsampling
return response
def submit(self):
data, sign = self.exchange.create_request(self.unit, self.key, self.secret)
params = {'unit': self.unit, 'user': self.key, 'sign': sign}
params.update(data)
params.update(self.cost)
curtime = time.time()
ret = self.conn.post('liquidity', params, trials=1, timeout=60)
if ret['code'] != 0:
self.trials += time.time() - curtime + 60.0 / self.sampling
self.logger.error("submit: %s" % ret['message'])
if ret['code'] == 11: # user unknown, just register again
self.register()
else:
self.trials = 0
self.errorflag = self.trials >= 120 # notify that something is wrong after 2 minutes of failures
def run(self):
ret = self.register()
if ret['code'] != 0: self.logger.error("register: %s" % ret['message'])
while self.active:
curtime = time.time()
start_new_thread(self.submit, ())
time.sleep(max(60.0 / self.sampling - time.time() + curtime, 0))
# actual client class which contains several (key,unit) pairs
class Client(ConnectionThread):
def __init__(self, server, logger=None):
self.logger = getlogger() if not logger else logger
self.conn = Connection(server, logger)
super(Client, self).__init__(self.conn, self.logger)
self.basestatus = self.conn.get('status')
self.exchangeinfo = self.conn.get('exchanges')
self.sampling = min(240, 4 * self.basestatus['sampling'])
self.users = {}
self.lock = threading.Lock()
def set(self, key, secret, address, name, unit, bid=None, ask=None, bot='pybot', ordermatch=False):
if not name in self.exchangeinfo or not unit in self.exchangeinfo[name]:
return False
key = str(key)
secret = str(secret)
if isinstance(_wrappers[name], type):
_wrappers[name] = _wrappers[name]()
exchange = _wrappers[name]
cost = {'bid': bid if bid else self.exchangeinfo[name][unit]['bid']['rate'],
'ask': ask if ask else self.exchangeinfo[name][unit]['ask']['rate']}
self.lock.acquire()
if not key in self.users:
self.users[key] = {}
if unit in self.users[key]:
self.shutdown(key, unit)
self.users[key][unit] = {
'request': RequestThread(self.conn, key, secret, exchange, unit, address, self.sampling, cost, self.logger)}
self.users[key][unit]['request'].start()
target = {'bid': self.exchangeinfo[name][unit]['bid']['target'],
'ask': self.exchangeinfo[name][unit]['ask']['target']}
if not bot or bot == 'none':
self.users[key][unit]['order'] = None
elif bot == 'nubot':
self.users[key][unit]['order'] = NuBot(self.conn, self.users[key][unit]['request'], key, secret, exchange,
unit, target, self.logger, ordermatch)
elif bot == 'pybot':
self.users[key][unit]['order'] = PyBot(self.conn, self.users[key][unit]['request'], key, secret, exchange,
unit, target, self.logger, ordermatch)
else:
self.logger.error("unknown order handler: %s", bot)
self.users[key][unit]['order'] = None
if self.users[key][unit]['order']:
if self.users[key][unit]['order']:
self.users[key][unit]['order'].start()
self.lock.release()
return True
def shutdown(self, key=None, unit=None, join=True):
if key == None:
for key in self.users:
self.shutdown(key, unit, False)
if join:
for key in self.users:
self.shutdown(key, unit, True)
elif unit == None:
for unit in self.users[key]:
self.shutdown(key, unit, False)
if join:
for unit in self.users[key]:
self.shutdown(key, unit, True)
else:
while True:
try:
self.users[key][unit]['request'].stop()
if self.users[key][unit]['order']:
self.users[key][unit]['order'].stop()
if join:
self.users[key][unit]['request'].join()
if self.users[key][unit]['order']:
self.users[key][unit]['order'].join()
except KeyboardInterrupt:
continue
break
def run(self):
starttime = time.time()
curtime = time.time()
efficiencies = []
while self.active:
sleep = 60 - time.time() + curtime
whil |
edisonlz/fruit | web_project/base/site-packages/grappelli/dashboard/dashboards.py | Python | apache-2.0 | 6,499 | 0.004462 | """
Module where admin tools dashboard classes are defined.
"""
from django.template.defaultfilters import slugify
from django.utils.importlib import import_module
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.contrib.contenttypes.models import ContentType
from grappelli.dashboard import modules
from grappelli.dashboard.utils import get_admin_site_name
from django import forms
class Dashboard(object):
"""
Base class for dashboards.
The Dashboard class is a simple python list that has three additional
properties:
``title``
The dashboard title, by default, it is displayed above the dashboard
in a ``h2`` tag. Default value: 'Dashboard'.
``template``
The template to use to render the dashboard.
Default value: 'admin_tools/dashboard/dashboard.html'
``columns``
An integer that represents the number of columns for the dashboard.
Default value: 2.
If you want to customize the look of your dashboard and it's modules, you
can declare css stylesheets and/or javascript files to include when
rendering the dashboard (these files should be placed in your
media path), for example::
from admin_tools.dashboard import Dashboard
class MyDashboard(Dashboard):
class Media:
css = {
'all': (
'css/mydashboard.css',
'css/mystyles.css',
),
}
js = (
'js/mydashboard.js',
'js/myscript.js',
)
Here's an example of a custom dashboard::
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from admin_tools.dashboard import modules, Dashboard
class MyDashboard(Dashboard):
# we want a 3 columns layout
columns = 3
def __init__(self, **kwargs):
# append an app list module for "Applications"
self.children.append(modules.AppList(
title=_('Applications'),
exclude=('django.contrib.*',),
))
# append an app list module for "Administration"
self.children.append(modules.AppList(
title=_('Administration'),
models=('django.contrib.*',),
))
# append a recent actions module
self.children.append(modules.RecentActions(
title=_('Recent Actions'),
limit=5
))
"""
# Using Django's Media meta class
__metaclass__ = forms.MediaDefiningClass
def _media(self):
return forms.Media()
media = property(_media)
title = _('Dashboard')
template = 'grappelli/dashboard/dashboard.html'
columns = 2
children = None
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self.__class__, key):
setattr(self, key, kwargs[key])
self.children = self.children or []
def init_with_context(self, context):
"""
Sometimes you may need to access context or request variables to build
your dashboard, this is what the ``init_with_context()`` method is for.
This method is called just before the display with a
``django.template.RequestContext`` as unique argument, so you can
access to all context variables and to the ``django.http.HttpRequest``.
"""
pass
def get_id(self):
"""
Internal method used to distinguish different dashboards in js code.
"""
return 'dashboard'
class DefaultIndexDashboard(Dashboard):
"""
The default dashboard displayed on the admin index page.
To change the default dashboard you'll have to type the following from the
commandline in your project root directory::
python manage.py customdashboard
And then set the `GRAPPELLI_INDEX_DASHBOARD`` settings variable to
point to your custom index dashboard class.
"""
def init_with_context(self, context):
site_name = get_admin_site_name(context)
# append a link list module for "quick links"
self.children.append(modules.LinkList(
_('Quick links'),
layout='inline',
draggable=False,
deletable=False,
collapsible=False,
children=[
[_('Return to site'), '/'],
[_('Change password'),
reverse('%s:password_change' % site_name)],
[_('Log out'), reverse('%s:logout' % site_name)],
]
))
# append an app list module for "Applications"
self.children.append(modules.AppList(
_('Applications'),
exclude=('django.contrib.*',),
))
# append an app list module for "Administration"
self.children.append(modules.AppList(
_('Administration'),
models=('django.contrib.*',),
))
# append a recent actions module
self.children.append(modules.RecentActions(_('Recent Actions'), 5))
# append a feed module
self.children.append(modules.Feed(
_('Latest Django News'),
feed_url='http://www.djangoproject.com/rss/weblog/',
limit=5
))
# append another link list module for "support".
self.children.append(modules. | LinkList(
_('Support'),
children=[
{
'title': _('Django documentation'),
'url': 'http://docs.djangoproject.com/',
'exter | nal': True,
},
{
'title': _('Django "django-users" mailing list'),
'url': 'http://groups.google.com/group/django-users',
'external': True,
},
{
'title': _('Django irc channel'),
'url': 'irc://irc.freenode.net/django',
'external': True,
},
]
))
|
sighalt/pyhooker | examples/example_classes/wheel.py | Python | gpl-3.0 | 95 | 0 | from .interfaces import I | Wheel
_ | _author__ = 'sighalt'
class MichelinWheel(IWheel):
pass
|
dougnd/matplotlib2tikz | test/testfunctions/image_plot.py | Python | mit | 753 | 0 | # -*- coding: utf-8 -*-
#
desc = 'An \\texttt{imshow} plot'
phash = '7558d3b30f634b06'
def plot():
from matplotlib import rcParam | s
from matplotlib import pyplot as pp
import os
try:
from PIL import Image
except ImportError:
raise RuntimeError('PIL must be installed to run this example')
this_dir = os.path.dirname(os.path.realpath(__file__))
lena = Image.open(os.path.join(this_dir, 'lena.png'))
dpi = rcParams['figure.dpi']
figsize = lena.size[0]/dpi, lena.size[1]/dpi
fig = pp.figure(figsize=figsize)
ax = pp.axes([0, 0, 1, 1], frameon=False)
ax.set_axis_off()
| pp.imshow(lena, origin='lower')
# Set the current color map to HSV.
pp.hsv()
pp.colorbar()
return fig
|
tmeits/pybrain | pybrain/tests/unittests/structure/networks/custom/test_capturegame_network.py | Python | bsd-3-clause | 1,368 | 0.001462 | """
Build a CaptureGameNetwork with LSTM cells
>>> from pybrain.structure.networks.custom import CaptureGameNetwork
>>> from pybrain import MDLSTMLayer
>>> size = 2
>>> n = CaptureGameNetwork(size = size, componentclass = MDLSTMLayer, hsize = 1, peepholes = False)
Check it's string representation
>>> print(n)
CaptureGameNetwork-s2-h1-MDLSTMLayer--...
Modules:
[<BiasUnit 'bias'>, <LinearLayer 'input'>, <MDLSTMLayer 'hidden(0, 0, 0)'>, ... | <MDLSTMLayer 'hidden(0, 0, 3)'>, <SigmoidLayer 'output'>]
Connections:
[<IdentityConnection ...
Check some of the connections dimensionalities
>>> c1 = n.connections[n['hidden(1, 0, 3)']][0]
>>> c2 = n.connections[n['hidden(0, 1, 2)']][-1]
>>> print((c1.indim, c1.outdim))
(1, 1)
>>> print((c2.indim, c2.outdim))
(1, 1)
>>> n.paramdim
21
Try writing it to an xml file, reread it and determine if it looks the same:
| >>> from pybrain.tests import xmlInvariance
>>> xmlInvariance(n)
Same representation
Same function
Same class
Check its gradient:
>>> from pybrain.tests import gradientCheck
>>> gradientCheck(n)
Perfect gradient
True
"""
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.tests import runModuleTestSuite
if __name__ == '__main__':
runModuleTestSuite(__import__('__main__'))
|
washimimizuku/frozen-flower | frozenflower2/frontend/admin.py | Python | mit | 218 | 0 | from django.contrib import admin
from frozenflower.frontend.models import *
admin.site.register(Tag)
admin.site.register(Article)
admin.site.register(Comment)
admin.site.register(Feed)
admin.site.register(Repository | )
| |
technologiescollege/Blockly-rduino-communication | scripts_XP/Lib/site-packages/idlexlib/idlexMain.py | Python | gpl-3.0 | 12,959 | 0.005093 | #! /usr/bin/env python
## """
## Copyright(C) 2011 The Board of Trustees of the University of Illinois.
## All rights reserved.
##
## Developed by: Roger D. Serwy
## University of Illinois
##
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal with the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
##
## + Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimers.
## + Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimers in the
## documentation and/or other materials provided with the distribution.
## + Neither the names of Roger D. Serwy, the University of Illinois, nor
## the names of its contributors may be used to endorse or promote
## products derived from this Software without specific prior written
## permission.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
## OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
## IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR
## ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
## CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
## THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
##
##
## """
# This module hotpatches EditorWindow.py to load idlex extensions properly
from __future__ import print_function
import sys
from idlexlib.extensionManager import extensionManager
import idlelib
import os
import __main__
import imp
import traceback
import re
from idlelib import macosxSupport
version = "1.12" # IdleX version
IDLE_DEFAULT_EXT = [] # list of default extensions that IDLE has
if sys.version < '3':
from StringIO import StringIO
from Tkinter import *
import Tkinter as tkinter
import tkFileDialog
import tkMessageBox
else:
from io import StringIO
from tkinter import *
import tkinter
import tkinter.filedialog as tkFileDialog
import tkinter.messagebox as tkMessageBox
from idlelib.configHandler import idleConf, IdleConfParser
ansi_re = re.compile(r'\x01?\x1b\[(.*?)m\x02?')
def strip_ansi(s):
return ansi_re.sub("", s)
def install_idlex_manager():
""" install IDLEX extension manager into IDLE """
# 2011-11-15 Bugfix - change the user config file names for IdleX
# to avoid a problem on Windows where pythonw.exe refuses to run
# idle.pyw when an error occurs. However python.exe runs idle.py just fine.
# See http://bugs.python.org/issue13582
u = idleConf.userCfg
for key, value in list(u.items()):
# add "idlex-" to user config file names
fullfile = value.file
directory, filename = os.pat | h.split(fullfile)
if filename.startswith('idlex-'):
new_filename = filename
else:
new_filename = 'idlex-' + filename
new_fullfile = os.path.join(dir | ectory, new_filename)
value.file = new_fullfile
value.Load()
mod = extensionManager.load_extension('idlexManager')
mod.extensionManager = extensionManager
mod.version = version
mod.update_globals()
# add idlex to the extension list
e = idleConf.userCfg['extensions']
if not e.has_section('idlexManager'):
e.add_section('idlexManager')
e.set('idlexManager', 'enable', '1')
def _printExt():
a = []
for i in idleConf.defaultCfg['extensions'].sections():
if i.endswith('_cfgBindings') or i.endswith('_bindings'):
continue
a.append(i)
print('Extensions: %s' % a)
###########################################################################
##
## HOTPATCHING CODE
##
###########################################################################
def fix_tk86():
tkinter._Tk = tkinter.Tk
def wrapper(func, name):
Tcl_Obj = tkinter._tkinter.Tcl_Obj
def f(*args, **kwargs):
#print(name, 'wrapped', args, kwargs)
#t = [i for i in args if isinstance(i, Tcl_Obj)]
#for i in t:
# print(name, 'FOUND arg:', repr(i), type(i), str(i))
args = [i if not isinstance(i, Tcl_Obj) else str(i)
for i in args]
for key, value in kwargs.items():
if isinstance(value, Tcl_Obj):
#print(name, 'FOUND kwarg:', key, value)
kwargs[key] = str(value)
return func(*args, **kwargs)
return f
class TkReflector(object):
def __init__(self, tk):
self.tk = tk
def __getattribute__(self, name):
a = getattr(object.__getattribute__(self, 'tk'), name)
if name in ['splitlist']:
#if hasattr(a, '__call__'):
return wrapper(a, name)
else:
return a
class TkFix(tkinter.Tk):
def __init__(self, *args, **kwargs):
tkinter._Tk.__init__(self, *args, **kwargs)
self.__tk = self.tk
version = self.tk.call('info', 'patchlevel')
if version.startswith('8.6'):
self.tk = TkReflector(self.__tk)
tkinter.Tk = TkFix
def _hotpatch():
# Fix numerous outstanding IDLE issues...
import idlelib.EditorWindow
EditorWindowOrig = idlelib.EditorWindow.EditorWindow
class EditorWindow(EditorWindowOrig):
_invalid_keybindings = [] # keep track of invalid keybindings encountered
_valid_keybindings = []
# Work around a bug in IDLE for handling events bound to menu items.
# The <<event-variables>> are stored globally, not locally to
# each editor window. Without this, toggling a checked menu item
# in one editor window toggles the item in ALL editor windows.
# Issue 13179
def __init__(self, flist=None, filename=None, key=None, root=None):
if flist is not None:
flist.vars = {}
EditorWindowOrig.__init__(self, flist, filename, key, root)
# FIXME: Do not transfer custom keybindings if IDLE keyset is set to default
# Fix broken keybindings that has plagued IDLE for years.
# Issue 12387, 4765, 13071, 6739, 5707, 11437
def apply_bindings(self, keydefs=None): # SUBCLASS to catch errors
#return EditorWindowOrig.apply_bindings(self, keydefs)
if keydefs is None:
keydefs = self.Bindings.default_keydefs
text = self.text
text.keydefs = keydefs
invalid = []
for event, keylist in keydefs.items():
for key in keylist:
try:
text.event_add(event, key)
except TclError as err:
#print(' Apply bindings error:', event, key)
invalid.append((event, key))
if invalid: # notify errors
self._keybinding_error(invalid)
def RemoveKeybindings(self): # SUBCLASS to catch errors
"Remove the keybindings before they are changed."
EditorWindow._invalid_keybindings = []
# Called from configDialog.py
self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
for event, keylist in keydefs.items():
for key in keylist:
try:
self.text.event_delete(event, key)
except Exception as err:
print(' Caught event_delete error:', err)
print(' For %s, %s' % (event, key))
pass
|
google/google-ctf | third_party/edk2/ArmPlatformPkg/Scripts/Ds5/profile.py | Python | apache-2.0 | 11,068 | 0.02331 | #!/usr/bin/python
#
# Copyright (c) 2014, ARM Limited. All rights reserved.
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
import getopt
import operator
import os
import pickle
import sys
from sys import argv
from cStringIO import StringIO
modules = {}
functions = {}
functions_addr = {}
def usage():
print "-t,--trace: Location of the Trace file"
print "-s,--symbols: Location of the symbols and modules"
def get_address_from_string(address):
return int(address.strip("S:").strip("N:").strip("EL2:").strip("EL1:"), 16)
def get_module_from_addr(modules, addr):
for key,value in modules.items():
if (value['start'] <= addr) and (addr <= value['end']):
return key
return None
def add_cycles_to_function(functions, func_name, addr, cycles):
if func_name != "<Unknown>":
# Check if we are still in the previous function
if add_cycles_to_function.prev_func_name == func_name:
add_cycles_to_function.prev_entry['cycles'] += cycles
return (add_cycles_to_function.prev_func_name, add_cycles_to_function.prev_module_name)
if func_name in functions.keys():
for module_name, module_value in functions[func_name].iteritems():
if (module_value['start'] <= addr) and (addr < module_value['end']):
| module_value['cycles'] += cycles
add_cycles_to_function.prev_func_name = func_name
add_cycles_to_function.prev_module_name = module_name
add_cycles_to_function.prev_entry = module_value
return (func_name, module_name)
elif (module_value['end'] == 0):
module_value['cycles'] += cycles
add_cycles_to_function.prev_func_name = func_name
add_cycles_to_function.prev_module_name = mo | dule_name
add_cycles_to_function.prev_entry = module_value
return (func_name, module_name)
# Workaround to fix the 'info func' limitation that does not expose the 'static' function
module_name = get_module_from_addr(modules, addr)
functions[func_name] = {}
functions[func_name][module_name] = {}
functions[func_name][module_name]['start'] = 0
functions[func_name][module_name]['end'] = 0
functions[func_name][module_name]['cycles'] = cycles
functions[func_name][module_name]['count'] = 0
add_cycles_to_function.prev_func_name = func_name
add_cycles_to_function.prev_module_name = module_name
add_cycles_to_function.prev_entry = functions[func_name][module_name]
return (func_name, module_name)
else:
# Check if we are still in the previous function
if (add_cycles_to_function.prev_entry is not None) and (add_cycles_to_function.prev_entry['start'] <= addr) and (addr < add_cycles_to_function.prev_entry['end']):
add_cycles_to_function.prev_entry['cycles'] += cycles
return (add_cycles_to_function.prev_func_name, add_cycles_to_function.prev_module_name)
# Generate the key for the given address
key = addr & ~0x0FFF
if key not in functions_addr.keys():
if 'Unknown' not in functions.keys():
functions['Unknown'] = {}
if 'Unknown' not in functions['Unknown'].keys():
functions['Unknown']['Unknown'] = {}
functions['Unknown']['Unknown']['cycles'] = 0
functions['Unknown']['Unknown']['count'] = 0
functions['Unknown']['Unknown']['cycles'] += cycles
add_cycles_to_function.prev_func_name = None
return None
for func_key, module in functions_addr[key].iteritems():
for module_key, module_value in module.iteritems():
if (module_value['start'] <= addr) and (addr < module_value['end']):
module_value['cycles'] += cycles
# In case o <Unknown> we prefer to fallback on the direct search
add_cycles_to_function.prev_func_name = func_key
add_cycles_to_function.prev_module_name = module_key
add_cycles_to_function.prev_entry = module_value
return (func_key, module_key)
print "Warning: Function %s @ 0x%x not found" % (func_name, addr)
add_cycles_to_function.prev_func_name = None
return None
# Static variables for the previous function
add_cycles_to_function.prev_func_name = None
add_cycles_to_function.prev_entry = None
def trace_read():
global trace_process
line = trace.readline()
trace_process += len(line)
return line
#
# Parse arguments
#
trace_name = None
symbols_file = None
opts,args = getopt.getopt(sys.argv[1:], "ht:vs:v", ["help","trace=","symbols="])
if (opts is None) or (not opts):
usage()
sys.exit()
for o,a in opts:
if o in ("-h","--help"):
usage()
sys.exit()
elif o in ("-t","--trace"):
trace_name = a
elif o in ("-s","--symbols"):
symbols_file = a
else:
assert False, "Unhandled option (%s)" % o
#
# We try first to see if we run the script from DS-5
#
try:
from arm_ds.debugger_v1 import Debugger
from arm_ds.debugger_v1 import DebugException
# Debugger object for accessing the debugger
debugger = Debugger()
# Initialisation commands
ec = debugger.getExecutionContext(0)
ec.getExecutionService().stop()
ec.getExecutionService().waitForStop()
# in case the execution context reference is out of date
ec = debugger.getExecutionContext(0)
#
# Get the module name and their memory range
#
info_file = ec.executeDSCommand("info file")
info_file_str = StringIO(info_file)
line = info_file_str.readline().strip('\n')
while line != '':
if ("Symbols from" in line):
# Get the module name from the line 'Symbols from "/home/...."'
module_name = line.split("\"")[1].split("/")[-1]
modules[module_name] = {}
# Look for the text section
line = info_file_str.readline().strip('\n')
while (line != '') and ("Symbols from" not in line):
if ("ER_RO" in line):
modules[module_name]['start'] = get_address_from_string(line.split()[0])
modules[module_name]['end'] = get_address_from_string(line.split()[2])
line = info_file_str.readline().strip('\n')
break;
if (".text" in line):
modules[module_name]['start'] = get_address_from_string(line.split()[0])
modules[module_name]['end'] = get_address_from_string(line.split()[2])
line = info_file_str.readline().strip('\n')
break;
line = info_file_str.readline().strip('\n')
line = info_file_str.readline().strip('\n')
#
# Get the function name and their memory range
#
info_func = ec.executeDSCommand("info func")
info_func_str = StringIO(info_func)
# Skip the first line 'Low-level symbols ...'
line = info_func_str.readline().strip('\n')
func_prev = None
while line != '':
# We ignore all the functions after 'Functions in'
if ("Functions in " in line):
line = info_func_str.readline().strip('\n')
while line != '':
line = info_func_str.readline().strip('\n')
line = info_func_str.readline().strip('\n')
continue
if ("Low-level symbols" in line):
# We need to fixup the last function of the module
if func_prev is not None:
func_prev['end'] = modules[module_name]['end']
func_prev = None
line = info_func_str.readline().strip('\n')
continue
func_name = line.split()[1]
func_start = get_address_from_string(line.split()[0])
module_name = get_module_from_addr(modules, func_start)
if func_name not in functions.keys():
functions[func_name] = {}
functions[func_name][module_name] = {}
functions[func_name][module_name]['start'] = func_start
functions[func_name][module_name]['cycles'] = 0
functions[func_name][module_name]['count'] = 0
# Set the end address of the previous function
if func_prev is not None:
func_prev['end'] = func_start
func_prev = functions[func_name][module_name]
line = info_func_str.readline().strip('\n')
# Fixup the last function
func_prev['end'] = modules[module_name]['end']
if symbols_file is not None:
pickle.dump((modules, functions), open(symbols_file, "w"))
except:
if symbols_file is None:
print "Error: Symbols file is required when run ou |
bahattincinic/arguman.org | web/premises/migrations/0031_premise_weight.py | Python | mit | 431 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrat | ions.Migration):
dependencies = [
('premises', '0030_report_reason'),
]
operations = [
migrations.AddField(
model_name='premise',
| name='weight',
field=models.IntegerField(default=0),
preserve_default=True,
),
]
|
Lilykos/invenio | invenio/ext/sqlalchemy/__init__.py | Python | gpl-2.0 | 7,906 | 0.000379 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Initialization and configuration for `flask_sqlalchemy`."""
from flask_registry import ModuleAutoDiscoveryRegistry, RegistryProxy
from flask_sqlalchemy import SQLAlchemy as FlaskSQLAlchemy
import sqlalchemy
import sqlalchemy.dialects.postgresql
from sqlalchemy import event, types
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.pool import Pool
from sqlalchemy_utils import JSONType
from invenio.ext.sqlalchemy.types import LegacyBigInteger, LegacyInteger, \
LegacyMediumInteger, LegacySmallInteger, LegacyTinyInteger
from .expressions import AsBINARY
from .types import GUID, MarshalBinary, PickleBinary
from .utils import get_model_type
def _include_sqlalchemy(obj, engine=None):
"""Init all required SQLAlchemy's types."""
# for module in sqlalchemy, sqlalchemy.orm:
# for key in module.__all__:
# if not hasattr(obj, key):
# setattr(obj, key,
# getattr(module, key))
if engine == 'mysql':
from sqlalchemy.dialects import mysql as engine_types
else:
from sqlalchemy import types as engine_types
# Length is provided to JSONType to ensure MySQL uses LONGTEXT instead
# of TEXT which only provides for 64kb storage compared to 4gb for
# LONGTEXT.
setattr(obj, 'JSON', JSONType(length=2 ** 32 - 2))
setattr(obj, 'Char', engine_types.CHAR)
try:
setattr(obj, 'TinyText', engine_types.TINYTEXT)
except:
setattr(obj, 'TinyText', engine_types.TEXT)
setattr(obj, 'hybrid_property', hybrid_property)
try:
setattr(obj, 'Double', engine_types.DOUBLE)
except:
setattr(obj, 'Double', engine_types.FLOAT)
setattr(obj, 'Binary', sqlalchemy.types.LargeBinary)
setattr(obj, 'iBinary', sqlalchemy.types.LargeBinary)
setattr(obj, 'iLargeBinary', sqlalchemy.types.LargeBinary)
setattr(obj, 'iMediumBinary', sqlalchemy.types.LargeBinary)
setattr(obj, 'UUID', GUID)
setattr(obj, 'Integer', LegacyInteger)
setattr(obj, 'MediumInteger', LegacyMediumInteger)
setattr(obj, 'SmallInteger', LegacySmallInteger)
setattr(obj, 'TinyInteger', LegacyTinyInteger)
setattr(obj, 'BigInteger', LegacyBigInteger)
if engine == 'mysql':
from .engines import mysql as dummy_mysql # noqa
# module = invenio.sqlalchemyutils_mysql
# for key in module.__dict__:
# setattr(obj, key,
# getattr(module, key))
obj.AsBINARY = AsBINARY
obj.MarshalBinary = MarshalBinary
obj.PickleBinary = PickleBinary
# Overwrite :meth:`MutableDick.update` to detect changes.
from sqlalchemy.ext.mutable import MutableDict
def update_mutable_dict(self, *args, **kwargs):
super(MutableDict, self).update(*args, **kwargs)
self.changed()
MutableDict.update = update_mutable_dict
obj.MutableDict = MutableDict
# @compiles(types.Text, 'postgresql')
# @compiles(sqlalchemy.dialects.postgresql.TEXT, 'postgresql')
# def compile_text(element, compiler, **kw):
# """Redefine Text filed type for PostgreSQL."""
# return 'TEXT'
# @compiles(types.VARBINARY, 'postgresql')
# def compile_text(element, compiler, **kw):
# """Redefine VARBINARY filed type for PostgreSQL."""
# return 'BYTEA'
def autocommit_on_checkin(dbapi_con, con_record):
"""Call autocommit on raw mysql connection for fixing bug in MySQL 5.5."""
try:
dbapi_con.autocommit(True)
except:
pass
# FIXME
# from invenio.ext.logging import register_exception
# register_exception()
# Possibly register globally.
# event.listen(Pool, 'checkin', autocommit_on_checkin)
class SQLAlchemy(FlaskSQLAlchemy):
"""Database object."""
def init_app(self, app):
"""Init application."""
super(self.__class__, self).init_app(app)
engine = app.config.get('CFG_DATABASE_TYPE', 'mysql')
self.Model = get_model_type(self.Model)
if engine == 'mysql':
# Override MySQL parameters to force MyISAM engine
mysql_parameters = {'keep_existing': True,
'extend_existing': False,
'mysql_engine': 'MyISAM',
'mysql_charset': 'utf8'}
original_table = self.Table
def table_with_myisam(*args, **kwargs):
"""Use same MySQL parameters that are used for ORM models."""
new_kwargs = dict(mysql_parameters)
new_kwargs.update(kwargs)
return original_table(*args, **new_kwargs)
self.Table = table_with_myisam
self.Model.__table_args__ = mysql_parameters
_include_sqlalchemy(self, engine=engine)
def __getattr__(self, name):
"""
Called when the normal mechanism fails.
This is only called when the normal mechanism fails,
so in p | ractice should never be called.
It is only provided to satisfy pylint that it is okay not to
raise E1101 errors in the client code.
:see http://stack | overflow.com/a/3515234/780928
"""
raise AttributeError("%r instance has no attribute %r" % (self, name))
def schemadiff(self, excludeTables=None):
"""Generate a schema diff."""
from migrate.versioning import schemadiff
return schemadiff \
.getDiffOfModelAgainstDatabase(self.metadata,
self.engine,
excludeTables=excludeTables)
def apply_driver_hacks(self, app, info, options):
"""Called before engine creation."""
# Don't forget to apply hacks defined on parent object.
super(self.__class__, self).apply_driver_hacks(app, info, options)
if info.drivername == 'mysql':
options.setdefault('execution_options', {
# Autocommit cause Exception in SQLAlchemy >= 0.9.
# @see http://docs.sqlalchemy.org/en/rel_0_9/
# core/connections.html#understanding-autocommit
# 'autocommit': True,
'use_unicode': False,
'charset': 'utf8mb4',
})
event.listen(Pool, 'checkin', autocommit_on_checkin)
db = SQLAlchemy()
"""
Provides access to :class:`~.SQLAlchemy` instance.
"""
models = RegistryProxy('models', ModuleAutoDiscoveryRegistry, 'models')
def setup_app(app):
"""Setup SQLAlchemy extension."""
if 'SQLALCHEMY_DATABASE_URI' not in app.config:
from sqlalchemy.engine.url import URL
cfg = app.config
app.config['SQLALCHEMY_DATABASE_URI'] = URL(
cfg.get('CFG_DATABASE_TYPE', 'mysql'),
username=cfg.get('CFG_DATABASE_USER'),
password=cfg.get('CFG_DATABASE_PASS'),
host=cfg.get('CFG_DATABASE_HOST'),
database=cfg.get('CFG_DATABASE_NAME'),
port=cfg.get('CFG_DATABASE_PORT'),
)
# Let's initialize database.
db.init_app(app)
return app
|
anthimeschrefheere/openClassM | openClassM/forum/urls.py | Python | gpl-2.0 | 274 | 0.043796 | # from django.conf.urls import patterns, url
# urlpatterns = patterns('',
# url | (r'^forum/$', 'forum.views.forum_dir'),
# url(r'^forum/(?P<forum_id>\d+)/$', 'forum.views.thread_dir'),
# url(r'^thread/(?P<thread_id>\d+)/$', 'forum.views.post_d | ir'),
# )
|
seeARMS/Computer-Network-Queue-Simulation | rando.py | Python | mit | 774 | 0.00646 | import numpy as np
'''
pps = packets per second
rand = the randomly generated number
'''
def exponential(pps, rand):
X = (-1 / pps) * np.log(1 - rand)
return X
def generate_random():
# this needs to incl | ude 1 though?
# currently its [0, 1)
s = np.random.uniform
return s
'''
ticks * tick_duration is the time duration
for which we want to simulate the system
'''
def tick(ticks):
for i in ticks:
'''todo: call the data packet generator to try to generate
a new data packet
-or-
call the server to let it know another tick has elapsed,
and the server will decide if servicing the current packet
| will end in this tick, thereby pushing the packet out of queue
'''
|
jirikuncar/invenio-utils | invenio_utils/datacite.py | Python | gpl-2.0 | 4,248 | 0.000471 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Utilities for working with DataCite metadata."""
from __future__ import absolute_import
import re
import urllib2
from invenio_utils.xmlDict import ElementTree, XmlDictConfig
__all__ = (
'DataciteMetadata',
)
class DataciteMetadata(object):
"""Helper class for working with DataCite metadata."""
def __init__(self, doi):
"""Initiali | ze object."""
self.url = "http://data.datacite.org/application | /x-datacite+xml/"
self.error = False
try:
data = urllib2.urlopen(self.url + doi).read()
except urllib2.HTTPError:
self.error = True
if not self.error:
# Clean the xml for parsing
data = re.sub('<\?xml.*\?>', '', data, count=1)
# Remove the resource tags
data = re.sub('<resource .*xsd">', '', data)
self.data = '<?xml version="1.0"?><datacite>' + \
data[0:len(data) - 11] + '</datacite>'
self.root = ElementTree.XML(self.data)
self.xml = XmlDictConfig(self.root)
def get_creators(self, attribute='creatorName'):
"""Get DataCite creators."""
if 'creators' in self.xml:
if isinstance(self.xml['creators']['creator'], list):
return [c[attribute] for c in self.xml['creators']['creator']]
else:
return self.xml['creators']['creator'][attribute]
return None
def get_titles(self):
"""Get DataCite titles."""
if 'titles' in self.xml:
return self.xml['titles']['title']
return None
def get_publisher(self):
"""Get DataCite publisher."""
if 'publisher' in self.xml:
return self.xml['publisher']
return None
def get_dates(self):
"""Get DataCite dates."""
if 'dates' in self.xml:
if isinstance(self.xml['dates']['date'], dict):
return self.xml['dates']['date'].values()[0]
return self.xml['dates']['date']
return None
def get_publication_year(self):
"""Get DataCite publication year."""
if 'publicationYear' in self.xml:
return self.xml['publicationYear']
return None
def get_language(self):
"""Get DataCite language."""
if 'language' in self.xml:
return self.xml['language']
return None
def get_related_identifiers(self):
"""Get DataCite related identifiers."""
pass
def get_description(self, description_type='Abstract'):
"""Get DataCite description."""
if 'descriptions' in self.xml:
if isinstance(self.xml['descriptions']['description'], list):
for description in self.xml['descriptions']['description']:
if description_type in description:
return description[description_type]
elif isinstance(self.xml['descriptions']['description'], dict):
description = self.xml['descriptions']['description']
if description_type in description:
return description[description_type]
elif len(description) == 1:
# return the only description
return description.values()[0]
return None
def get_rights(self):
"""Get DataCite rights."""
if 'titles' in self.xml:
return self.xml['rights']
return None
|
JohnPapps/django-oracle-drcp | django-oracle-drcp/compiler.py | Python | bsd-2-clause | 73 | 0 | # pylint: di | sable=W0401
from django.db.backends.oracle.compiler im | port *
|
thomec/tango | lists/models.py | Python | gpl-2.0 | 850 | 0 | # lists/models.py
from django.db import models
from django.conf import settings
from django.core.urlresolvers import reverse
class List(models.Model):
owner = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True)
def get_absolute_url(self):
return reverse('view_list', args=[self.id])
@staticmethod
def create_new(first_item_text, owner=None):
list_ = List.objects.create(owner=owner)
Item.objects.create(text=first_item_text, list=list_)
return list_
@property
def name(self):
return self.item_set.first().text
class Item(models.Model):
list = models.ForeignKey(List, default=None)
text = models.TextField(default='')
class Meta:
ordering | = ('id',)
unique_toge | ther = ('list', 'text')
def __str__(self):
return self.text
|
riccardodg/lodstuff | lremap/it.cnr.ilc.lremapowl/src/lremapobj/paper.py | Python | gpl-3.0 | 3,059 | 0.014711 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on May 12, 2014
Model Paper
fields:
conf
year
passcode
paper id
status
title
category1
category1
keywords
@author: riccardo
'''
class Paper(object):
__conf=""
__year=""
__passcode=""
__pid=""
__status=""
__title=""
__category1=""
__category2=""
__keywords=""
def __init__(self, conf,year,pid, status, title, category1, category2, keywords):
self.__conf = conf
self.__year = year
self.__pid = pid
self.__status = status
self.__title = title
self.__category1 = category1
self.__category2 = category2
self.__keywords = keywords
def get_passcode(self):
return self.__passcode
def get_pid(self):
return self.__pid
def get_status(self):
return self.__status
def get_title(self):
return self.__title
def get_category_1(self):
return self.__category1
def get_category_2(self):
return self.__category2
def get_keywords(self):
return self.__keywords
def set_passcode(self, value):
self.__passcode = value
def set_pid(self, value):
self.__pid = value
def set_status(self, value):
self.__status = value
def set_title(self, value):
self.__title = value
def set_category_1(self, value):
self.__category1 = value
def set_category_2(self, value):
self.__category2 = value
def set_keywords(self, value):
self.__keywords = value
def del_passcode(self):
del self.__passcode
def del_pid(self):
del self.__pid
def del_status(self):
del self.__status
def del_title(self):
del self.__title
def del_category_1(self):
del self.__category1
def del_category_2(self):
del self.__category2
def del_keywords(self):
del self.__keywords
passcode = property(get_passcode, set_passcode, del_passcode, "passcode's docstring")
pid = property(get_pid, set_pid, del_pid, "pid's docstring")
status = property(get_status, set_status, del_status, "status's docstring")
title = property(get_title, set_title, del_title, "title's docstring")
category1 = property(get_category_1, set_category_1, del_category_1, "category1's docstring")
category2 = property(get_category_2, set_category_2, del_category_2, "category2's docstring")
keywords = property(get_keywords, set_keywords, del_keywords, "keywords's docstring")
def get_conf(self):
return self.__conf
def get_year(self):
return self.__year
def set_conf(self, value):
self.__conf = value
def set_year(self, value): |
self.__year = value
def del_conf(self):
del self.__conf
def del_year(self):
del | self.__year
conf = property(get_conf, set_conf, del_conf, "conf's docstring")
year = property(get_year, set_year, del_year, "year's docstring")
|
beeftornado/sentry | src/sentry/runner/commands/createuser.py | Python | bsd-3-clause | 3,480 | 0.002011 | from __future__ import absolute_import, print_function
import click
import sys
from sentry.runner.decorators import configuration
def _get_field(field_name):
from sentry.models import User
return User._meta.get_field(field_name)
def _get_email():
from django.core.exceptions import ValidationError
rv = click.prompt("Email")
field = _get_field("email")
try:
return field.clean(rv, None)
except ValidationError as e:
raise click.ClickException("; ".join(e.messages))
def _get_password():
from django.core.exceptions import ValidationError
rv = click.prompt("Password", hide_input=True, confirmation_prompt=True)
field = _get_field("password")
try:
return field.clean(rv, None)
except ValidationError as e:
raise click.ClickException("; ".join(e.messages))
def _get_superuser():
return click.confirm("Should this user be a superuser?", default=False)
@click.command()
@click.option("--email")
@click.option("--password")
@click.option("--superuser/--no-superuser", default=None, is_flag=True)
@click.option("--no-password", default=False, is_flag=True)
@click.option("--no-input", default=False, is_flag=True)
@click.option("--force-update", default=False, is_flag=True)
@configuration
def createuser(email, password, superuser, no_password, no_input, force_update):
"Create a new user."
if not no_input:
if not email:
email = _get_email()
if not (password or no_password):
password = _get_password()
if superuser is None:
superuser = _get_superuser()
if superuser is None:
superuser = False
if not email:
raise click.ClickException("Invalid or missing email address.")
# TODO(mattrobenolt): Accept password over stdin?
if not no_password and not password:
raise click.ClickException("No password set and --no-password not passed.")
from sentry import roles
from sentry.models import User
from django.conf import settings
user = User(
email=email, username=email, is_superuser=superuser, is_staff=superuser, is_active=True
)
if password:
user.set_password(password)
if User.objects.filter(username=email).exists():
if force_update:
user | .save(force_update=force_update)
| click.echo("User updated: %s" % (email,))
else:
click.echo("User: %s exists, use --force-update to force" % (email,))
sys.exit(3)
else:
user.save()
click.echo("User created: %s" % (email,))
# TODO(dcramer): kill this when we improve flows
if settings.SENTRY_SINGLE_ORGANIZATION:
from sentry.models import Organization, OrganizationMember, OrganizationMemberTeam, Team
org = Organization.get_default()
if superuser:
role = roles.get_top_dog().id
else:
role = org.default_role
member = OrganizationMember.objects.create(organization=org, user=user, role=role)
# if we've only got a single team let's go ahead and give
# access to that team as its likely the desired outcome
teams = list(Team.objects.filter(organization=org)[0:2])
if len(teams) == 1:
OrganizationMemberTeam.objects.create(team=teams[0], organizationmember=member)
click.echo("Added to organization: %s" % (org.slug,))
|
robwarm/gpaw-symm | gpaw/test/pw/davidson_pw.py | Python | gpl-3.0 | 1,034 | 0.000967 | from ase import Atom, Atoms
from gpaw import GPAW
from g | paw.test import equal
a = 4.05
d = a / 2**0.5
bulk = Atoms([Atom('Al', (0, 0, 0)),
Atom('Al', (0.5, 0.5, 0.5))], pb | c=True)
bulk.set_cell((d, d, a), scale_atoms=True)
h = 0.25
calc = GPAW(mode='pw',
nbands=2*8,
kpts=(2, 2, 2),
convergence={'eigenstates': 7.2e-9, 'energy': 1e-5})
bulk.set_calculator(calc)
e0 = bulk.get_potential_energy()
niter0 = calc.get_number_of_iterations()
calc = GPAW(mode='pw',
nbands=2*8,
kpts=(2, 2, 2),
convergence={'eigenstates': 7.2e-9,
'energy': 1e-5,
'bands': 5 },
eigensolver='dav')
bulk.set_calculator(calc)
e1 = bulk.get_potential_energy()
niter1 = calc.get_number_of_iterations()
equal(e0, e1, 5.0e-6)
energy_tolerance = 0.00004
niter_tolerance = 0
equal(e0, -6.97798, energy_tolerance)
assert 10 <= niter0 <= 14, niter0
equal(e1, -6.97798, energy_tolerance)
assert 10 <= niter1 <= 24, niter1
|
cernanalysispreservation/analysis-preservation.cern.ch | cap/modules/deposit/minters.py | Python | gpl-2.0 | 1,769 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2018 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted | to it by virtue of its status
# as an Intergovernmental Or | ganization or submit itself to any jurisdiction.
# or submit itself to any jurisdiction.
"""PID minters for drafts."""
from __future__ import absolute_import, print_function
import uuid
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
def cap_deposit_minter(record_uuid, data):
"""Mint deposit's identifier."""
try:
pid_value = data['_deposit']['id']
except KeyError:
pid_value = uuid.uuid4().hex
pid = PersistentIdentifier.create(
'depid',
pid_value,
object_type='rec',
object_uuid=record_uuid,
status=PIDStatus.REGISTERED
)
data['_deposit'] = {
'id': pid.pid_value,
'status': 'draft',
}
return pid
|
leppa/home-assistant | homeassistant/components/hue/sensor.py | Python | apache-2.0 | 3,075 | 0.000325 | """Hue sensor entities."""
from aiohue.sensors import TYPE_ZLL_LIGHTLEVEL, TYPE_ZLL_TEMPERATURE
from homeassistant.components.hue.sensor_base import (
GenericZLLSensor,
SensorManager,
async_setup_entry as shared_async_setup_entry,
)
from homeassistant.const import (
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
)
from homeassistant.helpers.entity import Entity
LIGHT_LEVEL_NAME_FORMAT = "{} light level"
TEMPERATURE_NAME_FORMAT = "{} temperature"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Defer sensor setup to the shared sensor module."""
SensorManager.sensor_config_map.update(
{
TYPE_ZLL_LIGHTLEVEL: {
"binary": False,
"name_format": LIGHT_LEVEL_NAME_FORMAT,
"class": HueLightLevel,
},
TYPE_ZLL_TEMPERATURE: {
"binary": False,
"name_format": TEMPERATURE_NAME_FORMAT,
"class": HueTemperature,
},
}
)
await shared_async_setup_entry(hass, config_entry, async_add_entities, binary=False)
class GenericHueGaugeSensorEntity(GenericZLLSensor, Entity):
"""Parent class for all 'gauge' Hue device sensors."""
async def _async_update_ha_state(self, *args, **kwargs):
await self.async_update_ha_state(self, *args, **kwargs)
class HueLightLevel(GenericHueGaugeSensorEntity):
"""The light level sensor entity for a Hue motion sensor device."""
device_class = DEVICE_CLASS_ILLUMINANCE
unit_of_measurement = "lx"
@property
def state(self):
"""Return the state of the device."""
| if self.sensor.lightlevel is None:
return None
# https://developers.meethue.com/develop/hue-api/supported-devices/#clip_zll_lightlevel
# Light level in 10000 log10 (lux) +1 measured by sensor. Logarithm
# scale used because the human eye adjusts to light levels and small
# changes at low lux levels are more noticeable than at high lux
# levels.
return ro | und(float(10 ** ((self.sensor.lightlevel - 1) / 10000)), 2)
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attributes = super().device_state_attributes
attributes.update(
{
"lightlevel": self.sensor.lightlevel,
"daylight": self.sensor.daylight,
"dark": self.sensor.dark,
"threshold_dark": self.sensor.tholddark,
"threshold_offset": self.sensor.tholdoffset,
}
)
return attributes
class HueTemperature(GenericHueGaugeSensorEntity):
"""The temperature sensor entity for a Hue motion sensor device."""
device_class = DEVICE_CLASS_TEMPERATURE
unit_of_measurement = TEMP_CELSIUS
@property
def state(self):
"""Return the state of the device."""
if self.sensor.temperature is None:
return None
return self.sensor.temperature / 100
|
sibirrer/astrofunc | astrofunc/LightProfiles/hernquist.py | Python | mit | 2,501 | 0.001999 | import numpy as np
class Hernquist(object):
"""
class for pseudo Jaffe lens light (2d projected light/mass distribution
"""
def __init__(self):
from astrofunc.LensingProfiles.hernquist import Hernquist as Hernquist_lens
self.lens = Hernquist_lens()
def function(self, x, y, sigma0, Rs, center_x=0, center_y=0):
"""
:param x:
:param y:
:param sigma0:
:param a:
:param s:
:param center_x:
:param center_y:
:return:
"""
rho0 = self.lens.sigma2rho(sigma0, Rs)
return self.lens.density_2d(x, y, rho0, Rs, center_x, center_y | )
def light_3d(self, r, sigma0, Rs):
"""
:param y:
:param sigma0:
:param Rs:
:param center_x:
:param center_y:
:return:
"""
rho0 = self.lens.sigma2rho(sigma0, Rs)
return self.lens.density(r, rho0, Rs)
class Hernquist_Ellipse(object):
"""
class for elliptical pseudo Jaffe lens light (2d | projected light/mass distribution
"""
def __init__(self):
from astrofunc.LensingProfiles.hernquist import Hernquist as Hernquist_lens
self.lens = Hernquist_lens()
self.spherical = Hernquist()
def function(self, x, y, sigma0, Rs, q, phi_G, center_x=0, center_y=0):
"""
:param x:
:param y:
:param sigma0:
:param a:
:param s:
:param center_x:
:param center_y:
:return:
"""
x_ , y_ = self._coord_transf(x, y, q, phi_G, center_x, center_y)
return self.spherical.function(x_, y_, sigma0, Rs)
def light_3d(self, r, sigma0, Rs, q=1, phi_G=0):
"""
:param y:
:param sigma0:
:param Rs:
:param center_x:
:param center_y:
:return:
"""
rho0 = self.lens.sigma2rho(sigma0, Rs)
return self.lens.density(r, rho0, Rs)
def _coord_transf(self, x, y, q, phi_G, center_x, center_y):
"""
:param x:
:param y:
:param q:
:param phi_G:
:param center_x:
:param center_y:
:return:
"""
x_shift = x - center_x
y_shift = y - center_y
cos_phi = np.cos(phi_G)
sin_phi = np.sin(phi_G)
e = abs(1 - q)
x_ = (cos_phi * x_shift + sin_phi * y_shift) * np.sqrt(1 - e)
y_ = (-sin_phi * x_shift + cos_phi * y_shift) * np.sqrt(1 + e)
return x_, y_ |
NewGlobalStrategy/NetDecisionMaking | models/0.py | Python | mit | 1,624 | 0.009236 | # - Coding UTF8 -
#
# Networked Decision Making
# Site: http://code.google.com/p/global-decision-making-system/
#
# License Code: GPL, General Public License v. 2.0
# License Content: Creative Commons Attribution 3.0
#
# Also visit: www.web2py.com
# or Groups: http://groups.google.com/group/web2py
# For details on the web framework used for this development
#
# Developed by Russ King (newglobalstrategy@gmail.com
# Russ also blogs occasionally at proudofyourplanent.blogspot.com
# His general thinking on why this project is very important is available at
# http://www.scribd.com/doc/98216626/New-Global-Strategy
#
# This file contains settings for auth policy which are need before setup
# of rest of configuration so staying here for now
#
#########################################################################
from gluon.storage import Storage
settings = Storage()
#Settings for user logon - lets just uncomment as needed for now - not clear if there is much scope to
#allow changes and p | ython social auth will hopefully be added I don't think dual login worked with google but
#lets setup again and see
#Plan for this for now is that netdecisionmaking will use web2py and Janrain while
#globaldecisionmaking will use google - for some reason Janrain doesn't seem
#to come up with google as a login and google logi | n does not support dual methods
#reason for which has not been investigated
#settings.logon_methods = 'web2py'
#settings.logon_methods = 'google'
#settings.logon_methods = 'janrain'
settings.logon_methods = 'web2pyandjanrain'
settings.verification = False
settings.approval = False
|
rdo-management/neutron | neutron/db/l3_dvr_db.py | Python | apache-2.0 | 31,991 | 0.000344 | # Copyright (c) 2014 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from neutron.api.v2 import attributes
from neutron.common import constants as l3_const
from neutron.common import exceptions as n_exc
from neutron.common import utils as n_utils
from neutron.db import l3_attrs_db
from neutron.db import l3_db
from neutron.db import l3_dvrscheduler_db as l3_dvrsched_db
from neutron.db import models_v2
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron.i18n import _LI
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
LOG = logging.getLogger(__name__)
DEVICE_OWNER_DVR_INTERFACE = l3_const.DEVICE_OWNER_DVR_INTERFACE
DEVICE_OWNER_DVR_SNAT = l3_const.DEVICE_OWNER_ROUTER_SNAT
FLOATINGIP_AGENT_INTF_KEY = l3_const.FLOATINGIP_AGENT_INTF_KEY
DEVICE_OWNER_AGENT_GW = l3_const.DEVICE_OWNER_AGENT_GW
SNAT_ROUTER_INTF_KEY = l3_const.SNAT_ROUTER_INTF_KEY
router_distributed_opts = [
cfg.BoolOpt('router_distributed',
default=False,
help=_("System-wide flag to determine the type of router "
"that tenants can create. Only admin can override.")),
]
cfg.CONF.register_opts(router_distributed_opts)
class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
l3_attrs_db.ExtraAttributesMixin):
"""Mixin class to enable DVR support."""
router_device_owners = (
l3_db.L3_NAT_db_mixin.router_device_owners +
(DEVICE_OWNER_DVR_INTERFACE,
DEVICE_OWNER_DVR_SNAT,
DEVICE_OWNER_AGENT_GW))
extra_attributes = (
l3_attrs_db.ExtraAttributesMixin.extra_attributes + [{
'name': "distributed",
'default': cfg.CONF.router_distributed
}])
def _create_router_db(self, context, router, tenant_id):
"""Create a router db object with dvr additions."""
router['distributed'] = is_distributed_router(router)
with context.session.begin(subtransactions=True):
router_db = super(
L3_NAT_with_dvr_db_mixin, self)._create_router_db(
context, router, tenant_id)
self._process_extra_attr_router_create(context, router_db, router)
return router_db
def _validate_router_migration(self, context, router_db, router_res):
"""Allow centralized -> distributed state transition only."""
if (router_db.extra_attributes.distributed and
router_res.get('distributed') is False):
LOG.info(_LI("Centralizing distributed router %s "
"is not supported"), router_db['id'])
raise NotImplementedError()
elif (not router_db.extra_attributes.distributed and
router_res.get('distributed')):
# Add a check for Services FWaaS and VPNaaS
# This check below ensures that the legacy routers with
# associated VPNaaS or FWaaS services are not allowed to
# migrate.
if (self.check_router_has_no_vpnaas(context, router_db) and
self.check_router_has_no_firewall(context, router_db)):
LOG.info(_LI("No Service associated, so safe to migrate: %s "
| "listed"), router_db['id'])
def check_router_has_no_firewall(self, context, router_db):
"""Check if FWaaS is associated with the legacy router."""
fwaas_service = manager.NeutronManager.get_service_plugins().get(
constants.FIREWALL)
if fwaas_service:
tenant_firewalls = fwaas | _service.get_firewalls(
context,
filters={'tenant_id': [router_db['tenant_id']]})
if tenant_firewalls:
raise l3.RouterInUse(router_id=router_db['id'])
return True
def check_router_has_no_vpnaas(self, context, router_db):
"""Check if VPNaaS is associated with the legacy router."""
vpn_plugin = manager.NeutronManager.get_service_plugins().get(
constants.VPN)
if vpn_plugin:
vpn_plugin.check_router_in_use(context, router_db['id'])
return True
def _update_distributed_attr(
self, context, router_id, router_db, data, gw_info):
"""Update the model to support the dvr case of a router."""
if data.get('distributed'):
old_owner = l3_const.DEVICE_OWNER_ROUTER_INTF
new_owner = DEVICE_OWNER_DVR_INTERFACE
for rp in router_db.attached_ports.filter_by(port_type=old_owner):
rp.port_type = new_owner
rp.port.device_owner = new_owner
def _update_router_db(self, context, router_id, data, gw_info):
with context.session.begin(subtransactions=True):
router_db = super(
L3_NAT_with_dvr_db_mixin, self)._update_router_db(
context, router_id, data, gw_info)
migrating_to_distributed = (
not router_db.extra_attributes.distributed and
data.get('distributed') is True)
self._validate_router_migration(context, router_db, data)
router_db.extra_attributes.update(data)
self._update_distributed_attr(
context, router_id, router_db, data, gw_info)
if migrating_to_distributed:
if router_db['gw_port_id']:
# If the Legacy router is getting migrated to a DVR
# router, make sure to create corresponding
# snat interface ports that are to be consumed by
# the Service Node.
if not self.create_snat_intf_ports_if_not_exists(
context.elevated(), router_db):
LOG.debug("SNAT interface ports not created: %s",
router_db['id'])
cur_agents = self.list_l3_agents_hosting_router(
context, router_db['id'])['agents']
for agent in cur_agents:
self._unbind_router(context, router_db['id'],
agent['id'])
return router_db
def _delete_current_gw_port(self, context, router_id, router, new_network,
ext_ip_change):
super(L3_NAT_with_dvr_db_mixin,
self)._delete_current_gw_port(context, router_id,
router, new_network, ext_ip_change)
if router.extra_attributes.distributed:
self.delete_csnat_router_interface_ports(
context.elevated(), router)
def _create_gw_port(self, context, router_id, router, new_network, ext_ips,
ext_ip_change):
super(L3_NAT_with_dvr_db_mixin,
self)._create_gw_port(context, router_id, router, new_network,
ext_ips, ext_ip_change)
# Make sure that the gateway port exists before creating the
# snat interface ports for distributed router.
if router.extra_attributes.distributed and router.gw_port:
snat_p_list = self.create_snat_intf_ports_if_not_exists(
context.elevated(), router)
if not snat_p_list:
LOG.debug("SNAT interface ports not created: %s", snat_p_list)
def _get_device_owner(self, context, router=None):
"""Get device_owner for the specified router."""
router_is_uuid = isinstance(router, basestring)
i |
coreos/mockldap | setup.py | Python | bsd-2-clause | 1,346 | 0.000743 | #!/usr/bin/env python
from setuptools import setup
try:
import unittest2 # noqa
except ImportError:
test_loader = 'unittest:TestLoader'
else:
test_loader = 'unittest2:TestLoader'
setup(
na | me='mockldap',
version='0.1.8',
description=u"A simple mock implementation of python-ldap.",
long_description=open('README').read(),
url='http://bitbucket.org/psagers/mockldap/' | ,
author='Peter Sagerson',
author_email='psagers.pypi@ignorare.net',
license='BSD',
packages=['mockldap'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Programming Language :: Python',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Topic :: Internet :: WWW/HTTP',
'Topic :: System :: Systems Administration :: Authentication/Directory :: LDAP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords=['mock', 'ldap'],
install_requires=[
'python-ldap',
'funcparserlib==0.3.6',
'mock',
],
extras_require={
'passlib': ['passlib>=1.6.1'],
},
setup_requires=[
'setuptools>=0.6c11',
],
test_loader=test_loader,
test_suite='mockldap.tests',
)
|
crits/mcrits | transforms/relatedemails.py | Python | bsd-2-clause | 859 | 0 | from MaltegoTransform import *
from mcrits_utils import *
crits = mcrits()
me = MaltegoTransform()
me.parseArguments(sys.argv)
id_ = me.getVar('id')
crits_type = me.getVar('crits_type')
for result in crits.get_related(crits_type, id_, 'Email'):
# For each related object, get the details.
obj = crits.get_single_obj('Email', result[1])
ent = me.addEntity(result[0], result[1])
ent.addAdditionalFields(fieldName='date',
displayName='Date',
value=obj['date'])
ent.addAdditionalFields(fieldName='from',
displayName='From',
value=obj.get('from', ''))
ent.addAdditionalFields(fieldName='subject',
di | spla | yName='subject',
value=obj.get('subject', ''))
me.returnOutput()
|
remotesyssupport/koan | koan/imagecreate.py | Python | gpl-2.0 | 5,995 | 0.015013 | """
Virtualization installation functions for image based deployment
Copyright 2008 Red Hat, Inc.
Bryan Kearney <bkearney@redhat.com>
Original version based on virt-image
David Lutterkort <dlutter@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os, sys, time, stat
import shutil
import random
import exceptions
import errno
import virtinst
try:
from virtinst import ImageParser, Guest, CapabilitiesParser, VirtualNetworkInterface
except:
# if this fails, this is ok, the user just won't be able to use image objects...
# keeping this dynamic allows this to work on older EL.
pass
import libvirt
import app as koan
#FIXME this was copied
def random_mac():
"""
from xend/server/netif.py
Generate a random MAC address.
Uses OUI 00-16-3E, allocated to
Xensource, Inc. Last 3 fields are random.
return: MAC address string
"""
mac = [ 0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
return ':'.join(map(lambda x: "%02x" % x, mac))
def transform_arch(arch):
if arch == "i386":
return "i686"
else:
return arch
def copy_image(original_file, new_location):
shutil.copyfile(original_file, new_location)
return new_location
def process_disk(image, boot, file, location, target):
image_location = copy_image(file, location)
# Create the disk
disk = ImageParser.Disk()
disk.format = "raw"
disk.file = image_location
disk.use = "user"
disk.id = image_location
image.storage[disk.id] = disk
#Create the drive
drive = ImageParser.Drive()
drive.id = image_location
drive.target = target
drive.disk = disk
boot.disks.append(drive)
#dev api
#boot.drives.append(drive)
def process_networks(domain, guest, profile_data, bridge):
# Create a bridge or default network for every requested nic. If there are more
# bridges then nics discard the last one.
domain.interface = int(profile_data["network_count"])
bridges = []
#use the provided bridge first
guest_bridge = bridge
if guest_bridge is None:
guest_bridge = profile_data["virt_bridge"]
# Look for commas
if (guest_bridge is not None) and (len(guest_bridge.strip()) > 0):
if guest_bridge.find(",") == -1:
bridges.append(guest_bridge)
else:
bridges == guest_bridge.split(",")
for cnt in range(0,domain.interface):
if cnt < len(bridges):
nic = VirtualNetworkInterface(random_mac(), type="bridge", bridge = bridges[cnt])
#dev api
#nic = VirtualNetworkInterface(random_mac(), type="bridge", | bridge = bridge, conn=guest.conn)
else:
default_network = virtinst.util.default_network()
#dev api
#default_network = | virtinst.util.default_network(guest.conn)
nic = VirtualNetworkInterface(random_mac(), type=default_network[0], network=default_network[1])
guest.nics.append(nic)
def start_install(name=None, ram=None, disks=None,
uuid=None,
extra=None,
vcpus=None,
profile_data=None, arch=None, no_gfx=False, fullvirt=False, bridge=None, virt_type=None):
#FIXME how to do a non-default connection
#Can we drive off of virt-type?
connection = None
if (virt_type is None ) or (virt_type == "auto"):
connection = virtinst.util.default_connection()
elif virt_type.lower()[0:3] == "xen":
connection = "xen"
else:
connection = "qemu:///system"
connection = libvirt.open(connection)
capabilities = virtinst.CapabilitiesParser.parse(connection.getCapabilities())
image_arch = transform_arch(arch)
image = ImageParser.Image()
#dev api
#image = ImageParser.Image(filename="") #FIXME, ImageParser should take in None
image.name = name
domain = ImageParser.Domain()
domain.vcpu = vcpus
domain.memory = ram
image.domain = domain
boot = ImageParser.Boot()
boot.type = "hvm" #FIXME HARDCODED
boot.loader = "hd" #FIXME HARDCODED
boot.arch = image_arch
domain.boots.append(boot)
#FIXME Several issues. Single Disk, type is hardcoded
#And there is no way to provision with access to "file"
process_disk(image, boot, profile_data["file"], disks[0][0], "hda")
#FIXME boot_index??
installer = virtinst.ImageInstaller(boot_index = 0, image=image, capabilities=capabilities)
guest = virtinst.FullVirtGuest(connection = connection, installer=installer, arch=image_arch)
extra = extra.replace("&","&")
guest.extraargs = extra
guest.set_name(name)
guest.set_memory(ram)
guest.set_vcpus(vcpus)
if not no_gfx:
guest.set_graphics("vnc")
else:
guest.set_graphics(False)
if uuid is not None:
guest.set_uuid(uuid)
process_networks(domain, guest, profile_data, bridge)
guest.start_install()
return "use virt-manager or reconnect with virsh console %s" % name
|
MTG/essentia | test/src/unittests/tonal/test_tristimulus.py | Python | agpl-3.0 | 2,180 | 0.011927 | #!/usr/bin/env python
# Copyright (C) 2006-2021 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestTristimulus(TestCase):
def testZeroMag(self):
mags = [0,0,0,0,0]
freqs = [23, 500, 3200, 9000, 10000]
self.assertEqualVector(
Tristimulus()(freqs, mags),
[0,0,0])
def test3Freqs(self):
mags = [1,2,3]
freqs = [100, 200, 300]
self.assertAlmostEqualVector(
Tristimulus()(freqs, mags),
| [0.1666666667, 0, 0])
def test4Freqs(self):
mags = [1,2,3,4]
freqs = [100, 435, 6547, 24324]
self.assertAlmostEqualVector(
Tristimulus()(freqs, mags),
[.1, .9, 0])
def test5Freqs(self):
mags = [1,2,3,4,5]
freqs = [100, 324, 5678, 5899, 60000]
self.assertAlmostEqualVector(
Tristimulus()(freqs, mags),
[0.0666666667, .6, 0.33333333333])
def testFrequencyOrder(s | elf):
freqs = [1,2,1.1]
mags = [0,0,0]
self.assertComputeFails(Tristimulus(), freqs, mags)
def testFreqMagDiffSize(self):
freqs = [1]
mags = []
self.assertComputeFails(Tristimulus(), freqs, mags)
def testEmpty(self):
freqs = []
mags = []
self.assertEqualVector(Tristimulus()([],[]), [0,0,0])
suite = allTests(TestTristimulus)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
misuzu/torpedomsg | examples/client.py | Python | mit | 1,270 | 0 | import logging
import signal
import tornado.ioloop
import tornado.log
import torpedomsg
tornado.log.enable_pretty_logging()
class LineReader(object):
def __init__(self, host, port):
self.client = torpedomsg.TorpedoClient(host, port)
self.client.set_connect_callback(self.connect_callback)
self.client.set_disconnect_callback(self.disconnect_callback)
self.client.set_message_callback(self.message_callback)
def connect_callback(self, address):
logging.info('connected: %s:%s', *address)
self.client.send({'cmd': 'snapshot'})
def disconnect_callback(self, address):
logging.info('disconnected: %s:%s', *address)
def message_c | allback(self, address, msg):
cmd = msg.get('cmd')
data = msg.get('data')
if cmd == 'updates' or cmd == 'snapshot':
logging.info('%s: %s', cmd, len(data))
if __name__ == '__main__':
ioloop = tornado.ioloop.IOL | oop.instance()
reader = LineReader('127.0.0.1', 8888)
def handle_signal(sig, frame):
logging.warning('received signal: %r', sig)
ioloop.add_callback_from_signal(ioloop.stop)
signal.signal(signal.SIGINT, handle_signal)
signal.signal(signal.SIGTERM, handle_signal)
ioloop.start()
|
mzdaniel/oh-mainline | vendor/packages/amqplib/amqplib/client_0_8/transport.py | Python | agpl-3.0 | 7,349 | 0.002721 | """
Read/Write AMQP frames over network transports.
2009-01-14 Barry Pederson <bp@barryp.org>
"""
# Copyright (C) 2009 Barry Pederson <bp@barryp.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
import re
import socket
#
# See if Python 2.6+ SSL support is available
#
try:
import ssl
HAVE_PY26_SSL = True
except:
HAVE_PY26_SSL = False
try:
bytes
except:
# Python 2.5 and lower
bytes = str
from struct import pack, unpack
AMQP_PORT = 5672
# Yes, Advanced Message Queuing Protocol Protocol is redundant
AMQP_PROTOCOL_HEADER = 'AMQP\x01\x01\x09\x01'.encode('latin_1')
# Match things like: [fe80::1]:5432, from RFC 2732
IPV6_LITERAL = re.compile(r'\[([\.0-9a-f:]+)\](?::(\d+))?')
class _AbstractTransport(object):
"""
Common superclass for TCP and SSL transports
"""
def __init__(self, host, connect_timeout):
msg = 'socket.getaddrinfo() for %s returned an empty list' % host
port = AMQP_PORT
m = IPV6_LITERAL.match(host)
if m:
host = m.group(1)
if m.group(2):
port = int(m.group(2))
else:
if ':' in host:
host, port = host.rsplit(':', 1)
port = int(port)
self.sock = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, socket.SOL_TCP):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.settimeout(connect_timeout)
self.sock.connect(sa)
except socket.error, msg:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
# Didn't connect, return the most recent error message
raise socket.error, msg
self.sock.settimeout(None)
self.sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self._setup_transport()
self._write(AMQP_PROTOCOL_HEADER)
def __del__(self):
self.close()
def _read(self, n):
"""
Read exactly n bytes from the peer
"""
raise NotImplementedError('Must be overriden in subclass')
def _setup_transport(self):
"""
Do any additional initialization of the class (used
by the subclasses).
"""
pass
def _shutdown_transport(self):
"""
Do any preliminary work in shutting down the connection.
"""
pass
def _write(self, s):
"""
Completely write a string to the peer.
"""
raise NotImplementedError('Must be overriden in subclass')
def close(self):
if self.sock is not None:
self._shutdown_transport()
# Call shutdown first to make sure that pending messages
# reach the AMQP broker if the program exits after
# calling this method.
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
self.sock = None
def read_frame(self):
"""
Read an AMQP frame.
"""
frame_type, channel, size = unpack('>BHI', self._read(7))
payload = self._read(size)
ch = ord(self._read(1))
if ch == 206: # '\xce'
return frame_type, channel, payload
else:
raise Exception('Framing Error, received 0x%02x while expecting 0xce' % ch)
def write_frame(self, frame_type, channel, payload):
"""
Write out an AMQP frame.
"""
size = len(payload)
self._write(pack('>BHI%dsB' % size,
frame_type, channel, size, payload, 0xce))
class SSLTransport(_AbstractTransport):
"""
Transport that works over SSL
"""
def __init__(self, host, connect_timeout, ssl):
if isinstance(ssl, dict):
self.sslopts = ssl
self.sslobj = None
super(SSLTransport, self).__init__(host, connect_timeout)
def _setup_transport(self):
"""
Wrap the socket in an SSL object, either the
new Python 2.6 version, or the older Python 2.5 and
lower version.
"""
if HAVE_PY26_SSL:
if hasattr(self, 'sslopts'):
self.sslobj = ssl.wrap_socket(self.sock, **self.sslopts)
else:
self.sslobj = ssl.wrap_socket(self.sock)
self.sslobj.do_handshake()
else:
self.sslobj = socket.ssl(self.sock)
def _shutdown_transport(self):
"""
Unwrap a Python 2.6 SSL socket, so we can call shutdown()
"""
if HAVE_PY26_SSL and (self.sslobj is not None):
self.sock = self.sslobj.unwrap()
self.sslobj = None
def _read(self, n):
"""
It seems that SSL Objects read() method may not supply as much
as you're asking for, at least with extremely large messages.
somewhere > 16K - found this in the test_channel.py test_large
unittest.
"""
result = self.sslobj.read(n)
while len(result) < n:
s = self.sslobj.read(n - len(result))
if not s:
raise IOError('Socket closed')
result += s
return result
def _write(self, | s):
"""
Write a string out to the SSL | socket fully.
"""
while s:
n = self.sslobj.write(s)
if not n:
raise IOError('Socket closed')
s = s[n:]
class TCPTransport(_AbstractTransport):
"""
Transport that deals directly with TCP socket.
"""
def _setup_transport(self):
"""
Setup to _write() directly to the socket, and
do our own buffered reads.
"""
self._write = self.sock.sendall
self._read_buffer = bytes()
def _read(self, n):
"""
Read exactly n bytes from the socket
"""
while len(self._read_buffer) < n:
s = self.sock.recv(65536)
if not s:
raise IOError('Socket closed')
self._read_buffer += s
result = self._read_buffer[:n]
self._read_buffer = self._read_buffer[n:]
return result
def create_transport(host, connect_timeout, ssl=False):
"""
Given a few parameters from the Connection constructor,
select and create a subclass of _AbstractTransport.
"""
if ssl:
return SSLTransport(host, connect_timeout, ssl)
else:
return TCPTransport(host, connect_timeout)
|
ltilve/chromium | tools/chrome_proxy/integration_tests/chrome_proxy_benchmark.py | Python | bsd-3-clause | 6,442 | 0.017075 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from integration_tests import chrome_proxy_measurements as measurements
from integration_tests import chrome_proxy_pagesets as pagesets
from telemetry import benchmark
from telemetry.core.backends.chrome import android_browser_finder
ANDROID_CHROME_BROWSERS = [
browser for browser in android_browser_finder.CHROME_PACKAGE_NAMES
if 'webview' not in browser]
class ChromeProxyLatency(benchmark.Benchmark):
tag = 'latency'
test = measurements.ChromeProxyLatency
page_set = pagesets.Top20PageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.latency.top_20'
class ChromeProxyLatencyDirect(benchmark.Benchmark):
tag = 'latency_direct'
test = measurements.ChromeProxyLatency
page_set = pagesets.Top20PageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.latency_direct.top_20'
class ChromeProxyLatencySynthetic(ChromeProxyLatency):
page_set = pagesets.SyntheticPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.latency.synthetic'
class ChromeProxyLatencySyntheticDirect(ChromeProxyLatencyDirect):
page_set = pagesets.SyntheticPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.latency_direct.synthetic'
class ChromeProxyDataSaving(benchmark.Benchmark):
tag = 'data_saving'
test = measurements.ChromeProxyDataSaving
page_set = pagesets.Top20PageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.data_saving | .top_20'
class ChromeProxyDataSavingDirect(benchmark.Benchmark):
tag = 'data_saving_direct'
test = measurements.ChromeProxyDataSaving
page_set = pagesets.Top20PageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.data_saving_direct.top_20'
class ChromeProxyDataSavingSynthetic(ChromeProxyDataSaving):
page_set = pagesets.SyntheticPageSet
@classmetho | d
def Name(cls):
return 'chrome_proxy_benchmark.data_saving.synthetic'
class ChromeProxyDataSavingSyntheticDirect(ChromeProxyDataSavingDirect):
page_set = pagesets.SyntheticPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.data_saving_direct.synthetic'
class ChromeProxyHeaderValidation(benchmark.Benchmark):
tag = 'header_validation'
test = measurements.ChromeProxyHeaders
page_set = pagesets.Top20PageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.header_validation.top_20'
class ChromeProxyClientVersion(benchmark.Benchmark):
tag = 'client_version'
test = measurements.ChromeProxyClientVersion
page_set = pagesets.SyntheticPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.client_version.synthetic'
class ChromeProxyClientType(benchmark.Benchmark):
tag = 'client_type'
test = measurements.ChromeProxyClientType
page_set = pagesets.ClientTypePageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.client_type.client_type'
class ChromeProxyLoFi(benchmark.Benchmark):
tag = 'lo_fi'
test = measurements.ChromeProxyLoFi
page_set = pagesets.LoFiPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.lo_fi.lo_fi'
class ChromeProxyExpDirective(benchmark.Benchmark):
tag = 'exp_directive'
test = measurements.ChromeProxyExpDirective
page_set = pagesets.ExpDirectivePageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.exp_directive.exp_directive'
class ChromeProxyBypass(benchmark.Benchmark):
tag = 'bypass'
test = measurements.ChromeProxyBypass
page_set = pagesets.BypassPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.bypass.bypass'
class ChromeProxyCorsBypass(benchmark.Benchmark):
tag = 'bypass'
test = measurements.ChromeProxyCorsBypass
page_set = pagesets.CorsBypassPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.bypass.corsbypass'
class ChromeProxyBlockOnce(benchmark.Benchmark):
tag = 'block_once'
test = measurements.ChromeProxyBlockOnce
page_set = pagesets.BlockOncePageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.block_once.block_once'
@benchmark.Enabled(*ANDROID_CHROME_BROWSERS)
# Safebrowsing is enabled for Android and iOS.
class ChromeProxySafeBrowsingOn(benchmark.Benchmark):
tag = 'safebrowsing_on'
test = measurements.ChromeProxySafebrowsingOn
page_set = pagesets.SafebrowsingPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.safebrowsing_on.safebrowsing'
@benchmark.Disabled(*ANDROID_CHROME_BROWSERS)
# Safebrowsing is switched off for Android Webview and all desktop platforms.
class ChromeProxySafeBrowsingOff(benchmark.Benchmark):
tag = 'safebrowsing_off'
test = measurements.ChromeProxySafebrowsingOff
page_set = pagesets.SafebrowsingPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.safebrowsing_off.safebrowsing'
class ChromeProxyHTTPFallbackProbeURL(benchmark.Benchmark):
tag = 'fallback_probe'
test = measurements.ChromeProxyHTTPFallbackProbeURL
page_set = pagesets.SyntheticPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.fallback_probe.synthetic'
class ChromeProxyHTTPFallbackViaHeader(benchmark.Benchmark):
tag = 'fallback_viaheader'
test = measurements.ChromeProxyHTTPFallbackViaHeader
page_set = pagesets.FallbackViaHeaderPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.fallback_viaheader.fallback_viaheader'
class ChromeProxyHTTPToDirectFallback(benchmark.Benchmark):
tag = 'http_to_direct_fallback'
test = measurements.ChromeProxyHTTPToDirectFallback
page_set = pagesets.HTTPToDirectFallbackPageSet
@classmethod
def Name(cls):
return ('chrome_proxy_benchmark.http_to_direct_fallback.'
'http_to_direct_fallback')
class ChromeProxyReenableAfterBypass(benchmark.Benchmark):
tag = 'reenable_after_bypass'
test = measurements.ChromeProxyReenableAfterBypass
page_set = pagesets.ReenableAfterBypassPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.reenable_after_bypass.reenable_after_bypass'
class ChromeProxySmoke(benchmark.Benchmark):
tag = 'smoke'
test = measurements.ChromeProxySmoke
page_set = pagesets.SmokePageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.smoke.smoke'
|
limscoder/amfast | examples/streaming/python/cp_server.py | Python | mit | 1,741 | 0.004021 | """An example server using the CherryPy web framework.
To run the example execute the command:
python cp_server.py
"""
import os
import optparse
import logging
i | mport sys
import cherrypy
import amfas | t
from amfast.remoting.cherrypy_channel import CherryPyChannelSet, StreamingCherryPyChannel
class App(CherryPyChannelSet):
"""Base web app."""
@cherrypy.expose
def index(self):
raise cherrypy.HTTPRedirect('/streaming.html')
if __name__ == '__main__':
usage = """usage: %s [options]""" % __file__
parser = optparse.OptionParser(usage=usage)
parser.add_option("-p", default=8000,
dest="port", help="port number to serve")
parser.add_option("-d", default="localhost",
dest="domain", help="domain to serve")
parser.add_option("-l", action="store_true",
dest="log_debug", help="log debugging output")
(options, args) = parser.parse_args()
amfast.log_debug = options.log_debug
# Send log messages to STDOUT
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
amfast.logger.addHandler(handler)
cp_options = {
'global':
{
'server.socket_port': int(options.port),
'server.socket_host': str(options.domain),
},
'/':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(os.getcwd(), '../flex/deploy')
}
}
channel_set = App(notify_connections=True)
stream_channel = StreamingCherryPyChannel('amf')
channel_set.mapChannel(stream_channel)
cherrypy.quickstart(channel_set, '/', config=cp_options)
print "Serving on %s:%s" % (options.domain, options.port)
print "Press ctrl-c to halt."
|
dstockwell/catapult | dashboard/dashboard/alerts.py | Python | bsd-3-clause | 6,457 | 0.006659 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides the web interface for displaying an overview of alerts."""
__author__ = 'sullivan@google.com (Annie Sullivan)'
import datetime
import json
import os
from google.appengine.ext import ndb
from dashboard import email_template
from dashboard import request_handler
from dashboard import utils
from dashboard.models import anomaly
from dashboard.models import sheriff
from dashboard.models import stoppage_alert
_MAX_ANOMALIES_TO_COUNT = 5000
_MAX_ANOMALIES_TO_SHOW = 500
_MAX_STOPPAGE_ALERTS = 500
class AlertsHandler(request_handler.RequestHandler):
"""Shows an overview of recent anomalies for perf sheriffing."""
def get(self):
"""Renders the UI for listing alerts.
Request parameters:
sheriff: The name of a sheriff (optional).
triaged: Whether to include triaged alerts (i.e. with a bug ID).
improvements: Whether to include improvement anomalies.
Outputs:
A page displaying an overview table of all alerts.
"""
sheriff_name = self.request.get('sheriff', 'Chromium Perf Sheriff')
sheriff_key = ndb.Key('Sheriff', sheriff_name)
include_improvements = bool(self.request.get('improvements'))
include_triaged = bool(self.request.get('triaged'))
anomaly_keys = _FetchAnomalyKeys(
sheriff_key, include_improvements, include_triaged)
anomalies = ndb.get_multi(anomaly_keys[:_MAX_ANOMALIES_TO_SHOW])
stoppage_alerts = _FetchStoppageAlerts(sheriff_key, include_triaged)
self.RenderHtml('alerts.html', {
'anomaly_list': json.dumps(AnomalyDicts(anomalies)),
'stoppage_alert_list': json.dumps(StoppageAlertDicts(stoppage_alerts)),
'have_anomalies': bool(anomalies),
'have_stoppage_alerts': bool(stoppage_alerts),
'sheriff_list': json.dumps(_GetSheriffList()),
'num_anomalies': len(anomaly_keys),
})
def _FetchAnomalyKeys(sheriff_key, include_improvements, include_triaged):
"""Fetches the list of Anomaly keys that may be shown.
Args:
sheriff_key: The ndb.Key for the Sheriff to fetch alerts for.
include_improvements: Whether to include improvement Anomalies.
include_triaged: Whether to include Anomalies with a bug ID already set.
Returns:
A list of Anomaly keys, in reverse-chronological order.
"""
query = anomaly.Anomaly.query(
anomaly.Anomaly.sheriff == sheriff_key)
if not include_improvements:
query = query.filter(
anomaly.Anomaly.is_improvement == False)
if not include_triaged:
query = query.filter(
anomaly.Anomaly.bug_id == None)
query = query.filter(
anomaly.Anomaly.recovered == False)
query = query.order(-anomaly.Anomaly.timestamp)
return query.fetch(limit=_MAX_ANOMALIES_TO_COUNT, keys_only=True)
def _FetchStoppageAlerts(sheriff_key, include_triaged):
"""Fetches the list of Anomaly keys that may be shown.
Args:
sheriff_key: The ndb.Key for the Sheriff to fetch alerts for.
include_triaged: Whether to include alerts with a bug ID.
Returns:
A list of StoppageAlert entities, in reverse-chronological order.
"""
query = stoppage_alert.StoppageAlert.query(
stoppage_alert.StoppageAlert.sheriff == sheriff_key)
if not include_triaged:
query = query.filter(
stoppage_alert.StoppageAlert.bug_id == None)
query = query.filter(
stoppage_alert.StoppageAlert.recovered == False)
query = query.order(-stoppage_alert.StoppageAlert.timestamp)
return query.fetch(limit=_MAX_STOPPAGE_ALERTS)
def _GetSheriffList():
"""Returns a list of sheriff names for all sheriffs in the datastore."""
sheriff_keys = sheriff.Sheriff.query().fetch(keys_only=True)
return [key.string_id() for key in sheriff_keys]
def AnomalyDicts(anomalies):
"""Makes a list of dicts with properties of Anomaly entities."""
bisect_statuses = _GetBisectStatusDict(anomalies)
return [GetAnomalyDict(a, bisect_statuses.get(a.bug_id)) for a in anomalies]
def StoppageAlertDicts(stoppage_alerts):
"""Makes a list of dicts with properties of StoppageAlert | entities."""
return [_GetStoppageAlertDict(a) for a in stoppage_alerts]
def GetAnomalyDict(anomaly_entity, bisect_status=None):
"""Returns a dictionary for an Anomaly which can be encoded as JSON.
Args:
anomaly_entity: An Anomaly entity.
bisect_status: String status of bisect run.
Returns:
A dictionary which is safe to be encoded as JSON.
"""
alert_dict = _AlertDict(anomaly_entity)
alert_dict.update({
| 'median_after_anomaly': anomaly_entity.median_after_anomaly,
'median_before_anomaly': anomaly_entity.median_before_anomaly,
'percent_changed': '%s' % anomaly_entity.GetDisplayPercentChanged(),
'improvement': anomaly_entity.is_improvement,
'bisect_status': bisect_status,
'recovered': anomaly_entity.recovered,
})
return alert_dict
def _GetStoppageAlertDict(stoppage_alert_entity):
"""Returns a dictionary of properties of a stoppage alert."""
alert_dict = _AlertDict(stoppage_alert_entity)
alert_dict.update({
'mail_sent': stoppage_alert_entity.mail_sent,
'recovered': stoppage_alert_entity.recovered,
})
return alert_dict
def _AlertDict(alert_entity):
"""Returns a base dictionary with properties common to all alerts."""
test_path = utils.TestPath(alert_entity.test)
test_path_parts = test_path.split('/')
dashboard_link = email_template.GetReportPageLink(
test_path, rev=alert_entity.end_revision, add_protocol_and_host=False)
return {
'key': alert_entity.key.urlsafe(),
'group': alert_entity.group.urlsafe() if alert_entity.group else None,
'start_revision': alert_entity.start_revision,
'end_revision': alert_entity.end_revision,
'date': str(alert_entity.timestamp.date()),
'master': test_path_parts[0],
'bot': test_path_parts[1],
'testsuite': test_path_parts[2],
'test': '/'.join(test_path_parts[3:]),
'bug_id': alert_entity.bug_id,
'dashboard_link': dashboard_link,
}
def _GetBisectStatusDict(anomalies):
"""Returns a dictionary of bug ID to bisect status string."""
bug_id_list = {a.bug_id for a in anomalies if a.bug_id > 0}
bugs = ndb.get_multi(ndb.Key('Bug', b) for b in bug_id_list)
return {b.key.id(): b.latest_bisect_status for b in bugs if b}
|
Nate28/mayaxes | mayaxes.py | Python | gpl-2.0 | 6,007 | 0.013651 | # -*- coding: utf-8 -*-
"""
Created on Tue May 28 12:20:59 2013
=== MAYAXES (v1.1) ===
Generates a set of MayaVI axes using the mayavi.mlab.axes() object with a
white background, small black text and a centred title. Designed to better
mimic MATLAB style plots.
Unspecified arguments will be set to default values when mayaxes is called
(note that default settings are configured for a figure measuring 1024 x 768
pixels and may not display correctly on plots that are significantly larger
or small | er).
=== Inputs ===
'title' Figure title text (default = 'VOID')
'xlabel' X axis label text (default = 'X')
'ylabel' Y axis label text (default = 'Y')
'z | label' Z axis label text (default = 'Z')
'handle' Graphics handle of object (if bounding box is to be plotted)
'title_size' Font size of the title text (default = 25)
'ticks' Number of divisions on each axis (default = 7)
'font_scaling' Font scaling factor for axis text (default = 0.7)
'background' Background colour (can be 'b' (black) or 'w' (white))
=== Notes ===
Disbaling figure title: specify title_string='void' OR title_string='Void' OR
title_string='VOID' to disable figure title.
Disabling bounding box: specify handle='void' OR handle='Void' OR handle='VOID'
to disable figure bounding box.
=== Usage ===
from mayaxes import mayaxes
mayaxes('Figure title','X axis label','Y axis label','Z axis label')
OR
mayaxes(title_string='TITLE',xlabel='X',ylabel='Y',zlabel='Z',title_size=25,ticks=7,font_scaling=0.7)
=== Example ===
from mayaxes import test_mayaxes
test_mayaxes()
@author: Nathan Donaldson
"""
def mayaxes(title_string='VOID', xlabel='VOID', ylabel='VOID', zlabel='VOID', handle='VOID', \
title_size=25, ticks=7, font_scaling=0.7, background='w'):
if type(title_string) != str or type(xlabel) != str or type(ylabel) != str or type(zlabel) != str:
print('ERROR: label inputs must all be strings')
return
elif type(ticks) != int:
print('ERROR: number of ticks must be an integer')
return
elif type(font_scaling) != float and type(font_scaling) != int:
print('Error: font scaling factor must be an integer or a float')
return
from mayavi.mlab import axes,title,gcf,outline
# Create axes object
ax = axes()
# Font factor globally adjusts figure text size
ax.axes.font_factor = font_scaling
# Number of ticks along each axis
ax.axes.number_of_labels = ticks
# Set axis labels to input strings
# (spaces are included for padding so that labels do not intersect with axes)
if xlabel=='void' or xlabel=='Void' or xlabel=='VOID':
print 'X axis label title disabled'
else:
ax.axes.x_label = ' ' + xlabel
if ylabel=='void' or ylabel=='Void' or ylabel=='VOID':
print 'Y axis label disabled'
else:
ax.axes.y_label = ylabel + ' '
if zlabel=='void' or zlabel=='Void' or zlabel=='VOID':
print 'Z axis label disabled'
else:
ax.axes.z_label = zlabel + ' '
# Create figure title
if title_string=='void' or title_string=='Void' or title_string=='VOID':
print 'Figure title disabled'
else:
text_title = title(title_string)
text_title.x_position = 0.5
text_title.y_position = 0.9
text_title.property.color = (0.0, 0.0, 0.0)
text_title.actor.text_scale_mode = 'none'
text_title.property.font_size = title_size
text_title.property.justification = 'centered'
# Create bounding box
if handle=='void' or handle=='Void' or handle=='VOID':
print 'Bounding box disabled'
else:
if background == 'w':
bounding_box = outline(handle, color=(0.0, 0.0, 0.0), opacity=0.2)
elif background == 'b':
bounding_box = outline(handle, color=(1.0, 1.0, 1.0), opacity=0.2)
# Set axis, labels and titles to neat black text
#ax.property.color = (0.0, 0.0, 0.0)
#ax.title_text_property.color = (0.0, 0.0, 0.0)
#ax.label_text_property.color = (0.0, 0.0, 0.0)
ax.label_text_property.bold = False
ax.label_text_property.italic = False
ax.title_text_property.italic = False
ax.title_text_property.bold = False
# Reset axis range
ax.axes.use_ranges = True
# Set scene background, axis and text colours
fig = gcf()
if background == 'w':
fig.scene.background = (1.0, 1.0, 1.0)
ax.label_text_property.color = (0.0, 0.0, 0.0)
ax.property.color = (0.0, 0.0, 0.0)
ax.title_text_property.color = (0.0, 0.0, 0.0)
elif background == 'b':
fig.scene.background = (0.0, 0.0, 0.0)
ax.label_text_property.color = (1.0, 1.0, 1.0)
ax.property.color = (1.0, 1.0, 1.0)
ax.title_text_property.color = (1.0, 1.0, 1.0)
fig.scene.parallel_projection = True
def test_mayaxes():
from mayaxes import mayaxes
from scipy import sqrt,sin,meshgrid,linspace,pi
import mayavi.mlab as mlab
resolution = 200
lambda_var = 3
theta = linspace(-lambda_var*2*pi,lambda_var*2*pi,resolution)
x, y = meshgrid(theta, theta)
r = sqrt(x**2 + y**2)
z = sin(r)/r
fig = mlab.figure(size=(1024,768))
surf = mlab.surf(theta,theta,z,colormap='jet',opacity=1.0,warp_scale='auto')
mayaxes(title_string='Figure 1: Diminishing polar cosine series', \
xlabel='X data',ylabel='Y data',zlabel='Z data',handle=surf)
fig.scene.camera.position = [435.4093863309094, 434.1268937227623, 315.90311468125287]
fig.scene.camera.focal_point = [94.434632665253829, 93.152140057106593, -25.071638984402856]
fig.scene.camera.view_angle = 30.0
fig.scene.camera.view_up = [0.0, 0.0, 1.0]
fig.scene.camera.clipping_range = [287.45231734040635, 973.59247058049255]
fig.scene.camera.compute_view_plane_normal()
fig.scene.render()
mlab.show()
|
ContinuumIO/dask | dask/utils.py | Python | bsd-3-clause | 34,443 | 0.000436 | from datetime import timedelta
import functools
import inspect
import os
import shutil
import sys
import tempfile
import re
from errno import ENOENT
from collections.abc import Iterator
fr | om contextlib import contextmanager
from importlib import import_module
from numbers import Integral, Number
from threading import Lock
import uuid
from weakref import WeakValueDictionary
from functools import lru_cache
from .core import get_deps
from .optimization import key_split # noqa: F401
system_encoding = sys.getdefaultencoding()
if system_encoding == "ascii":
system_encoding = "utf-8"
def apply(func, args, kwargs=None):
if kwargs:
return func(*args, **kwargs)
| else:
return func(*args)
def deepmap(func, *seqs):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
>>> add = lambda x, y: x + y
>>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])
[[11, 22], [33, 44]]
"""
if isinstance(seqs[0], (list, Iterator)):
return [deepmap(func, *items) for items in zip(*seqs)]
else:
return func(*seqs)
def homogeneous_deepmap(func, seq):
if not seq:
return seq
n = 0
tmp = seq
while isinstance(tmp, list):
n += 1
tmp = tmp[0]
return ndeepmap(n, func, seq)
def ndeepmap(n, func, seq):
""" Call a function on every element within a nested container
>>> def inc(x):
... return x + 1
>>> L = [[1, 2], [3, 4, 5]]
>>> ndeepmap(2, inc, L)
[[2, 3], [4, 5, 6]]
"""
if n == 1:
return [func(item) for item in seq]
elif n > 1:
return [ndeepmap(n - 1, func, item) for item in seq]
elif isinstance(seq, list):
return func(seq[0])
else:
return func(seq)
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
def import_required(mod_name, error_msg):
"""Attempt to import a required dependency.
Raises a RuntimeError if the requested module is not available.
"""
try:
return import_module(mod_name)
except ImportError as e:
raise RuntimeError(error_msg) from e
@contextmanager
def tmpfile(extension="", dir=None):
extension = "." + extension.lstrip(".")
handle, filename = tempfile.mkstemp(extension, dir=dir)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
with ignoring(OSError):
os.remove(filename)
@contextmanager
def tmpdir(dir=None):
dirname = tempfile.mkdtemp(dir=dir)
try:
yield dirname
finally:
if os.path.exists(dirname):
if os.path.isdir(dirname):
with ignoring(OSError):
shutil.rmtree(dirname)
else:
with ignoring(OSError):
os.remove(dirname)
@contextmanager
def filetext(text, extension="", open=open, mode="w"):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
@contextmanager
def changed_cwd(new_cwd):
old_cwd = os.getcwd()
os.chdir(new_cwd)
try:
yield
finally:
os.chdir(old_cwd)
@contextmanager
def tmp_cwd(dir=None):
with tmpdir(dir) as dirname:
with changed_cwd(dirname):
yield dirname
@contextmanager
def noop_context():
yield
class IndexCallable(object):
""" Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
"""
__slots__ = ("fn",)
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
@contextmanager
def filetexts(d, open=open, mode="t", use_tmpdir=True):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
Since this is meant for use in tests, this context manager will
automatically switch to a temporary current directory, to avoid
race conditions when running tests in parallel.
"""
with (tmp_cwd() if use_tmpdir else noop_context()):
for filename, text in d.items():
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
f = open(filename, "w" + mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
with ignoring(OSError):
os.remove(filename)
def concrete(seq):
""" Make nested iterators concrete lists
>>> data = [[1, 2], [3, 4]]
>>> seq = iter(map(iter, data))
>>> concrete(seq)
[[1, 2], [3, 4]]
"""
if isinstance(seq, Iterator):
seq = list(seq)
if isinstance(seq, (tuple, list)):
seq = list(map(concrete, seq))
return seq
def pseudorandom(n, p, random_state=None):
""" Pseudorandom array of integer indexes
>>> pseudorandom(5, [0.5, 0.5], random_state=123)
array([1, 0, 0, 1, 1], dtype=int8)
>>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], random_state=5)
array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)
"""
import numpy as np
p = list(p)
cp = np.cumsum([0] + p)
assert np.allclose(1, cp[-1])
assert len(p) < 256
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
x = random_state.random_sample(n)
out = np.empty(n, dtype="i1")
for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):
out[(x >= low) & (x < high)] = i
return out
def random_state_data(n, random_state=None):
"""Return a list of arrays that can initialize
``np.random.RandomState``.
Parameters
----------
n : int
Number of arrays to return.
random_state : int or np.random.RandomState, optional
If an int, is used to seed a new ``RandomState``.
"""
import numpy as np
if not all(
hasattr(random_state, attr) for attr in ["normal", "beta", "bytes", "uniform"]
):
random_state = np.random.RandomState(random_state)
random_data = random_state.bytes(624 * n * 4) # `n * 624` 32-bit integers
l = list(np.frombuffer(random_data, dtype=np.uint32).reshape((n, -1)))
assert len(l) == n
return l
def is_integer(i):
"""
>>> is_integer(6)
True
>>> is_integer(42.0)
True
>>> is_integer('abc')
False
"""
return isinstance(i, Integral) or (isinstance(i, float) and i.is_integer())
ONE_ARITY_BUILTINS = set(
[
abs,
all,
any,
ascii,
bool,
bytearray,
bytes,
callable,
chr,
classmethod,
complex,
dict,
dir,
enumerate,
eval,
float,
format,
frozenset,
hash,
hex,
id,
int,
iter,
len,
list,
max,
min,
next,
oct,
open,
ord,
range,
repr,
reversed,
round,
set,
slice,
sorted,
staticmethod,
str,
sum,
tuple,
type,
vars,
zip,
memoryview,
]
)
MULTI_ARITY_BUILTINS = set(
[
compile,
delattr,
divmod,
filter,
getattr,
hasattr,
isinstance,
issubclass,
map,
pow,
setattr,
]
)
def getargspec(func):
"""Version of inspect.getargspec that works with partial and warps."""
if isinstance(func, functools.partial): |
stfp/memopol2 | apps/meps/models.py | Python | agpl-3.0 | 1,081 | 0.002775 | from django.db import models
from couchdbkit.ext.django.schema import Document, StringProperty, ListProperty
class MEP(Document):
id = StringProperty()
trophies_ids = ListProperty()
@property
def trophies(self):
"""
Retrieves trophies Dj | ango's objects from trophies_ids.
"""
from trophies.models import ManualTr | ophy
return [ManualTrophy.objects.get(id=trophy_id) for trophy_id in self.trophies_ids]
class Position(models.Model):
mep_id = models.CharField(max_length=128)
subject = models.CharField(max_length=128)
content = models.CharField(max_length=512)
submitter_username = models.CharField(max_length=30)
submitter_ip = models.IPAddressField()
submit_datetime = models.DateTimeField()
moderated = models.BooleanField()
moderated_by = models.CharField(max_length=30)
visible = models.BooleanField()
def __json__(self):
return {"mep_id": self.mep_id, "content": self.content}
def __unicode__(self):
return "<Position for mep id='%s'>" % (self.mep_id)
|
nioinnovation/python-xbee | xbee/tests/test_fake.py | Python | mit | 1,321 | 0.003028 | #! /usr/bin/python
"""
test_fake.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
Tests fake device objects for proper | functionality.
"""
import unittest
from xbee.tests.Fake import Serial
class TestFakeSerialRead(unittest.TestCase):
"""
Fake Serial class should work as intended to emluate reading from a serial port.
"""
def setUp(self):
"""
Create a fake read device for each test.
| """
self.device = Serial()
self.device.set_read_data("test")
def test_read_single_byte(self):
"""
Reading one byte at a time should work as expected.
"""
self.assertEqual(self.device.read(), 't')
self.assertEqual(self.device.read(), 'e')
self.assertEqual(self.device.read(), 's')
self.assertEqual(self.device.read(), 't')
def test_read_multiple_bytes(self):
"""
Reading multiple bytes at a time should work as expected.
"""
self.assertEqual(self.device.read(3), 'tes')
self.assertEqual(self.device.read(), 't')
def test_write(self):
"""
Test serial write function.
"""
self.device.write("Hello World")
self.assertEqual(self.device.get_data_written(), "Hello World")
|
kura/kura.io | plugins/pelican_gist/test_plugin.py | Python | mit | 2,786 | 0.000359 | # -*- coding: utf-8 -*-
"""
Test pelican-gist
=================
Test stuff in pelican_gist.
"""
from __future__ import unicode_literals
import os
from pelican_gist import plugin as gistplugin
from mock import patch
import requests.models
def test_gist_url():
gist_id = str(3254906)
filename = 'brew-update-notifier.sh'
# Test without a filename
url = gistplugin.gist_url(gist_id)
assert gist_id in url
# Test with filename
url = gistplugin.gist_url(gist_id, filename)
assert url.endswith(filename)
assert gist_id in url
def test_script_url():
gist_id = str(3254906)
filename = 'brew-update-notifier.sh'
# Test without a filename
url = gistplugin.script_url(gist_id)
assert url.endswith('.js')
assert gist_id in url
# Test with filename
url = gistplugin.script_url(gist_id, filename)
assert url.endswith(filename)
assert 'file={}'.format(filename) in url
assert gist_id in url
def test_cache_filename():
path_base = '/tmp'
gist_id = str(3254906)
filename = 'brew-update-notifier.sh'
# Test without a filename
path = gistplugin.cache_filename(path_base, gist_id)
assert path.startswith(path_base)
assert path.endswith('.cache')
# Test with filename
path = gistplugin.cache_filename(path_base, gist_id, filename)
assert path.startswith(path_base)
assert path.endswith('.cache')
def test_set_get_cache():
path_base = '/tmp'
gist_id = str(3254906)
filename = 'brew-update-notifier.sh'
bo | dy = """Some gist body"""
# Make sure there is no cache
for f in (gistplugin.cache_filename(path_base, gist_id),
gistplugin.cache_filename(path_base, gist_id, filename)):
if os.path.exists(f):
os.remove(f)
# Get an empty cache
cache_file = gistplugin.get_cache(path_base, gist_id)
assert cache_file is None
cache_file = gistplugin.get_cache(path_base, gist_id, filename)
assert cache_file is None
# Set a ca | che file
gistplugin.set_cache(path_base, gist_id, body)
# Fetch the same file
cached = gistplugin.get_cache(path_base, gist_id)
assert cached == body
# Set a cache file
gistplugin.set_cache(path_base, gist_id, body, filename)
# Fetch the same file
cached = gistplugin.get_cache(path_base, gist_id, filename)
assert cached == body
def test_fetch_gist():
"""Ensure fetch_gist returns the response content as a string."""
CODE_BODY = "code"
with patch('requests.get') as get:
return_response = requests.models.Response()
return_response.status_code = 200
return_response._content= CODE_BODY.encode()
get.return_value = return_response
assert gistplugin.fetch_gist(1) == CODE_BODY
|
kalpana-org/kalpana | kalpana/chapters.py | Python | gpl-3.0 | 19,512 | 0.001025 | # Copyright nycz 2011-2020
# This file is part of Kalpana.
# Kalpana is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Kalpana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Kalpana. If not, see <http://www.gnu.org/licenses/>.
"""
The internal representation of the chapters.
This should not import/depend on any GUI module (such as chapteroverview).
"""
import re
import webbrowser
from dataclasses import dataclass, field
from itertools import accumulate
from typing import Callable, Optional
from libsyntyche.cli import ArgumentRules, make_command
from libsyntyche.widgets import mk_signal1
from PyQt5 import QtCore, QtGui
from .common import KalpanaObject, TextBlockState, command_callback
from .settings import ExportFormat, ExportSettings, Settings
@dataclass
class Section:
desc: Optional[str] = None
line_count: int = 0
word_count: int = 0
@dataclass
class Chapter:
title: Optional[str] = None
complete: bool = False
metadata_line_count: int = 0
desc: Optional[str] = None
time: Optional[str] = None
tags: Optional[set[str]] = None
sections: list[Section] = field(init=False)
def __post_init__(self) -> None:
self.sections = [Section()]
def update_line(self, state: int, line: str, ch_str: str,
line_num: int) -> None:
if state & TextBlockState.CHAPTER:
self.title = line[len(ch_str):].strip('✓ \t')
self.complete = line.rstrip().endswith('✓')
elif state & TextBlockState.SECTION:
section_lines = list(
accumulate([self.metadata_line_count] + [s.line_count for s in self.sections])
)
self.sections[section_lines.index(line_num)].desc = line.rstrip()[2:-2].strip()
elif state & TextBlockState.DESC:
self.desc = line.rstrip()[2:-2].strip()
elif state & TextBlockState.TIME:
self.time = line[1:].strip()
elif state & TextBlockState.TAGS:
self.tags = {tag.strip()[1:] for tag in line.split(',') if tag.strip()}
@property
def line_count(self) -> int:
"""Return how many lines long the chapter is."""
return self.metadata_line_count + sum(s.line_count for s in self.sections)
@property
def word_count(self) -> int:
return sum(s.word_count for s in self.sections)
def __repr__(self) -> str:
def cap(text: Optional[str], length: int) -> str:
if text is None:
return ''
elif len(text) <= length:
return repr(text)
else:
return repr(text[:length-1] + '…')
return (
f'<{self.__class__.__module__}.{self.__class__.__name__} '
f'{"complete " if self.complete else ""}'
f'lines={self.line_count} words={self.word_count} '
f'title={cap(self.title, 10)} desc={cap(self.desc, 10)} '
f'time={cap(self.time, 10)} '
f'tags={"" if self.tags is None else len(self.tags)} '
f'sections={len(self.sections)}>'
)
class ChapterIndex(QtCore.QObject, KalpanaObject):
center_on_line = mk_signal1(int)
def __init__(self, get_document: Callable[[], QtGui.QTextDocument],
get_cursor: Callable[[], QtGui.QTextCursor],
settings: Settings) -> None:
super().__init__()
KalpanaObject.init(self, settings)
self.kalpana_ | commands = [
make_command(
'word-count-chapter',
self.count_chapter_words,
help_text='Print the word count of a chapter',
short_name='c',
arg_help={
'': 'Print the word count of the chapter your cursor is in.',
'7': 'Print the word count of chapter 7.'
},
),
make_command(
'go-to-chapter',
| self.go_to_chapter,
help_text='Jump to a specified chapter.',
short_name='.',
category='movement',
arg_help={
'0': 'Jump to the start of the file.',
'1': 'Jump to the first chapter.',
'n': 'Jump to the nth chapter (has to be a number).',
'-1': 'Jump to last chapter.',
'-n': 'Jump to nth to last chapter ' '(has to be a number).',
},
),
make_command(
'go-to-next-chapter',
self.go_to_next_chapter,
help_text='Jump to the next chapter.',
args=ArgumentRules.NONE,
short_name='>',
category='movement'
),
make_command(
'go-to-prev-chapter',
self.go_to_prev_chapter,
help_text='Jump to the previous chapter.',
args=ArgumentRules.NONE,
short_name='<',
category='movement',
),
make_command(
'export-chapter',
self.export_chapter,
help_text='Export a chapter',
args=ArgumentRules.REQUIRED,
short_name='e',
arg_help={'3 fmt': 'Export chapter 3 with the format "fmt".'},
),
]
self._get_document = get_document
self._get_cursor = get_cursor
self.chapters: list[Chapter] = []
self.chapter_keyword = self.settings.chapter_keyword.value
self._block_count = -1
# TODO: maybe actually use TextBlockState here?
self._block_states: dict[int, int] = {}
self._init_done = False
def update_setting_chapter_keyword(new_keyword: str) -> None:
self.chapter_keyword = new_keyword
self.settings.chapter_keyword.changed.connect(update_setting_chapter_keyword)
def init_done(self) -> None:
self._init_done = True
@command_callback
def count_chapter_words(self, arg: str) -> None:
if not self.chapters:
self.error('No chapters detected!')
elif not arg:
self.full_line_index_update()
current_line = self._get_cursor().blockNumber()
current_chapter = self.which_chapter(current_line)
words = self.chapters[current_chapter].word_count
self.log(f'Words in chapter {current_chapter}: {words}')
elif not arg.isdecimal():
self.error('Argument has to be a number!')
elif int(arg) >= len(self.chapters):
self.error('Invalid chapter!')
else:
# yes this is an ugly hack
self.full_line_index_update()
words = self.chapters[int(arg)].word_count
self.log(f'Words in chapter {arg}: {words}')
def _go_to_chapter(self, chapter: int) -> None:
total_chapters = len(self.chapters)
if chapter not in range(-total_chapters, total_chapters):
self.error('Invalid chapter!')
else:
if chapter < 0:
chapter += total_chapters
line = self.get_chapter_line(chapter)
self.center_on_line.emit(line)
@command_callback
def go_to_chapter(self, arg: str) -> None:
"""
Go to the chapter specified in arg.
arg - The argument string entered in the terminal. Negative values
means going from the end, where -1 is the last chapter
and -2 is the second to last.
"""
if not self.chapters:
self.error('No chapters detected!')
elif not re.match(r'-?\d+$', arg):
self.error('Argument has to be a number!')
els |
kedder/soaring-coupons | coupons/migrations/0004_auto_20191107_2124.py | Python | agpl-3.0 | 580 | 0.001724 | # Generated by Django 2.2.7 on 2019-11-07 21:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("coupons", "0003_auto_20191027_0939"),
]
operations = [
migrations.AlterField(
model_name="coupon",
name="id",
field=models.CharField(max_length=12, primary_key=True, serialize=False | ),
),
migrations.AlterField(
model_name="order",
name="notes",
field=models.CharField(max_length=255, null=True | ),
),
]
|
schelleg/PYNQ | pynq/lib/pmod/pmod_led8.py | Python | bsd-3-clause | 5,931 | 0.007587 | # Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import Pmod_DevMode
from . import PMOD_SWCFG_DIOALL
from . import PMOD_DIO_BASEADDR
from . import PMOD_DIO_TRI_OFFSET
from . import PMOD_DIO_DATA_OFFSET
from . import PMOD_CFG_DIO_ALLOUTPUT
from . import PMOD_NUM_DIGITAL_PINS
__author__ = "Graham Schelle, Giuseppe Natale, Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
class Pmod_LED8(Pmod_DevMode):
"""This class controls a single LED on the LED8 Pmod.
The Pmod LED8 (PB 200-163) has eight high-brightness LEDs. Each LED can be
individually illuminated from a logic high signal.
Attributes
----------
microblaze : Pmod
Microblaze processor instance used by this module.
iop_switch_config :list
Microblaze processor IO switch configuration (8 integers).
index : int
Index of the pin on LED8, starting from 0.
"""
def __init__(self, mb_info, index):
"""Return a new instance of | a LED object.
|
Parameters
----------
mb_info : dict
A dictionary storing Microblaze information, such as the
IP name and the reset name.
index: int
The index of the pin in a Pmod, starting from 0.
"""
if index not in range(PMOD_NUM_DIGITAL_PINS):
raise ValueError("Valid pin indexes are 0 - {}."
.format(PMOD_NUM_DIGITAL_PINS-1))
super().__init__(mb_info, PMOD_SWCFG_DIOALL)
self.index = index
self.start()
self.write_cmd(PMOD_DIO_BASEADDR +
PMOD_DIO_TRI_OFFSET,
PMOD_CFG_DIO_ALLOUTPUT)
def toggle(self):
"""Flip the bit of a single LED.
Note
----
The LED will be turned off if it is on. Similarly, it will be turned
on if it is off.
Returns
-------
None
"""
curr_val = self.read_cmd(PMOD_DIO_BASEADDR +
PMOD_DIO_DATA_OFFSET)
new_val = curr_val ^ (0x1 << self.index)
self._set_leds_values(new_val)
def on(self):
"""Turn on a single LED.
Returns
-------
None
"""
curr_val = self.read_cmd(PMOD_DIO_BASEADDR +
PMOD_DIO_DATA_OFFSET)
new_val = curr_val | (0x1 << self.index)
self._set_leds_values(new_val)
def off(self):
"""Turn off a single LED.
Returns
-------
None
"""
curr_val = self.read_cmd(PMOD_DIO_BASEADDR +
PMOD_DIO_DATA_OFFSET)
new_val = curr_val & (0xff ^ (0x1 << self.index))
self._set_leds_values(new_val)
def write(self, value):
"""Set the LED state according to the input value
Note
----
This method does not take into account the current LED state.
Parameters
----------
value : int
Turn on the LED if value is 1; turn it off if value is 0.
Returns
-------
None
"""
if value not in (0, 1):
raise ValueError("LED8 can only write 0 or 1.")
if value:
self.on()
else:
self.off()
def read(self):
"""Retrieve the LED state.
Returns
-------
int
The data (0 or 1) read out from the selected pin.
"""
curr_val = self.read_cmd(PMOD_DIO_BASEADDR +
PMOD_DIO_DATA_OFFSET)
return (curr_val >> self.index) & 0x1
def _set_leds_values(self, value):
"""Set the state for all the LEDs.
Note
----
Should not be used directly. User should rely on toggle(), on(),
off(), write(), and read() instead.
Parameters
----------
value : int
The state of all the LEDs encoded in one single value
Returns
-------
None
"""
self.write_cmd(PMOD_DIO_BASEADDR +
PMOD_DIO_DATA_OFFSET, value)
|
RudolfCardinal/crate | crate_anon/crateweb/config/urls.py | Python | gpl-3.0 | 18,507 | 0 | #!/usr/bin/env python
"""
crate_anon/crateweb/config/urls.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <https://www.gnu.org/licenses/>.
===============================================================================
**crateweb Django URL configuration**
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: ``from my_app import views``
2. Add a URL to urlpatterns: ``url(r'^$', views.home, name='home')``
Class-based views
1. Add an import: ``from other_app.views import Home``
2. Add a URL to urlpatterns: ``url(r'^$', Home.as_view(), name='home')``
Including another URLconf
1. Add an import: ``from blog import urls as blog_urls``
2. Add a URL to urlpatterns: ``url(r'^blog/', include(blog_urls))``
"""
import logging
import debug_toolbar
from django.conf import settings
from django.conf.urls import include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from crate_anon.crateweb.config.constants import (
DOWNLOAD_PRIVATESTORAGE_URL_STEM,
UrlNames,
)
from crate_anon.crateweb.core.admin import (
mgr_admin_site,
dev_admin_site,
res_admin_site,
)
import crate_anon.crateweb.core.auth_views as core_auth_views
import crate_anon.crateweb.core.views as core_views
import crate_anon.crateweb.consent.views as consent_views
import crate_anon.crateweb.research.views as research_views
import crate_anon.crateweb.userprofile.views as userprofile_views
# This is the place for one-time startup code.
# http://stackoverflow.com/questions/6791911/execute-code-when-django-starts-once-only # noqa
# So we cache things here that we don't want the user to have to wait for:
from crate_anon.crateweb.research.research_db_info import research_database_info # noqa
research_database_info.get_colinfolist()
log = logging.getLogger(__name__)
urlpatterns = [
# -------------------------------------------------------------------------
# Login, other authentication/password stuff
# -------------------------------------------------------------------------
url(r'^login/', core_auth_views.login_view, name=UrlNames.LOGIN),
url(r'^logout/', core_auth_views.logout_view, name=UrlNames.LOGOUT),
url(r'^password_change/', core_auth_views.password_change,
name=UrlNames.PASSWORD_CHANGE),
# -------------------------------------------------------------------------
# Home, About
# -------------------------------------------------------------------------
url(r'^$', core_views.home, name=UrlNames.HOME),
url(r'^about/$', core_views.about, name=UrlNames.ABOUT),
# -------------------------------------------------------------------------
# A | dmin sites
# -------------------------------------------------------------------------
# ... obfuscate: p351 of Greenfeld_2015.
url(r'^mgr_admin/', mgr_admin_site.urls),
url(r'^dev_admin/', dev_admin_site.urls),
url(r'^res_admin/', res_admin_site.urls),
# ... namespace is defined in call to AdminSite(); see core/admin.py
# -------------------------------------------------------------------------
# Main quer | y views
# -------------------------------------------------------------------------
url(r'^build_query/$',
research_views.query_build, name=UrlNames.BUILD_QUERY),
url(r'^query/$',
research_views.query_edit_select, name=UrlNames.QUERY),
url(r'^activate_query/(?P<query_id>[0-9]+)/$',
research_views.query_activate, name=UrlNames.ACTIVATE_QUERY),
url(r'^delete_query/(?P<query_id>[0-9]+)/$',
research_views.query_delete, name=UrlNames.DELETE_QUERY),
url(r'^highlight/$',
research_views.highlight_edit_select, name=UrlNames.HIGHLIGHT),
url(r'^activate_highlight/(?P<highlight_id>[0-9]+)/$',
research_views.highlight_activate, name=UrlNames.ACTIVATE_HIGHLIGHT),
url(r'^deactivate_highlight/(?P<highlight_id>[0-9]+)/$',
research_views.highlight_deactivate, name=UrlNames.DEACTIVATE_HIGHLIGHT), # noqa
url(r'^delete_highlight/(?P<highlight_id>[0-9]+)/$',
research_views.highlight_delete, name=UrlNames.DELETE_HIGHLIGHT),
url(r'^count/(?P<query_id>[0-9]+)/$',
research_views.query_count, name=UrlNames.COUNT),
url(r'^results/(?P<query_id>[0-9]+)/$',
research_views.query_results, name=UrlNames.RESULTS),
url(r'^results_recordwise/(?P<query_id>[0-9]+)/$',
research_views.query_results_recordwise,
name=UrlNames.RESULTS_RECORDWISE),
url(r'^tsv/(?P<query_id>[0-9]+)/$',
research_views.query_tsv, name=UrlNames.TSV),
url(r'^query_excel/(?P<query_id>[0-9]+)/$',
research_views.query_excel, name=UrlNames.QUERY_EXCEL),
url(r'^sitewide_queries/$',
research_views.query_add_sitewide, name=UrlNames.SITEWIDE_QUERIES),
url(r'^delete_sitewide_query/(?P<query_id>[0-9]+)/$',
research_views.sitewide_query_delete,
name=UrlNames.DELETE_SITEWIDE_QUERY),
url(r'^standard_queries/$',
research_views.show_sitewide_queries, name=UrlNames.STANDARD_QUERIES),
url(r'^process_standard_query/(?P<query_id>[0-9]+)/$',
research_views.sitewide_query_process,
name=UrlNames.PROCESS_STANDARD_QUERY),
url(r'^edit_display/(?P<query_id>[0-9]+)/$',
research_views.edit_display, name=UrlNames.EDIT_DISPLAY),
url(r'^save_display/(?P<query_id>[0-9]+)/$',
research_views.save_display, name=UrlNames.SAVE_DISPLAY),
url(r'^show_query/(?P<query_id>[0-9]+)/$',
research_views.show_query, name=UrlNames.SHOW_QUERY),
url(r'^source_information/(?P<srcdb>.+)/(?P<srctable>.+)/(?P<srcfield>.+)/(?P<srcpkfield>.+)/(?P<srcpkval>.+)/(?P<srcpkstr>.+)/$', # noqa
research_views.source_info, name=UrlNames.SRCINFO),
# -------------------------------------------------------------------------
# Patient Explorer views
# -------------------------------------------------------------------------
url(r'^pe_build/$',
research_views.pe_build, name=UrlNames.PE_BUILD),
url(r'^pe_choose/$',
research_views.pe_choose, name=UrlNames.PE_CHOOSE),
url(r'^pe_activate/(?P<pe_id>[0-9]+)/$',
research_views.pe_activate, name=UrlNames.PE_ACTIVATE),
url(r'^pe_edit/(?P<pe_id>[0-9]+)/$',
research_views.pe_edit, name=UrlNames.PE_EDIT),
url(r'^pe_delete/(?P<pe_id>[0-9]+)/$',
research_views.pe_delete, name=UrlNames.PE_DELETE),
url(r'^pe_results/(?P<pe_id>[0-9]+)/$',
research_views.pe_results, name=UrlNames.PE_RESULTS),
# url(r'^pe_tsv_zip/(?P<pe_id>[0-9]+)/$',
# research_views.patient_explorer_tsv_zip, name='pe_tsv_zip'),
url(r'^pe_excel/(?P<pe_id>[0-9]+)/$',
research_views.pe_excel, name=UrlNames.PE_EXCEL),
url(r'^pe_df_results/(?P<pe_id>[0-9]+)/$',
research_views.pe_data_finder_results, name=UrlNames.PE_DF_RESULTS),
url(r'^pe_df_excel/(?P<pe_id>[0-9]+)/$',
research_views.pe_data_finder_excel, name=UrlNames.PE_DF_EXCEL),
url(r'^pe_monster_results/(?P<pe_id>[0-9]+)/$',
research_views.pe_monster_results, name=UrlNames.PE_MONSTER_RESULTS),
# We don't offer the monster view in Excel; it'd be huge.
url(r'^pe_table_browser/(?P<pe_id>[0-9]+)/$',
research_views.pe_table_browser, name=UrlNames.PE_TABLE_BROWSER),
url(r'^pe_ |
eabdullin/nlp_mthesis | wordvectormix.py | Python | mit | 2,152 | 0.005112 | import numpy as np
import numpy.core.multiarray as ma
encoding = 'utf8'
unicode_errors = 'strict'
import scipy.spatial.distance as dist
totalcount = 0
alignedcount = 0
from math import *
def square_rooted(x):
return round(sqrt(sum([a * a for a in x])), 3)
def cosine_similarity(x, y):
numerator = sum(a * b for a, b in zip(x, y))
denominator = square_rooted(x) * square_rooted(y)
return round(numerator / float(denominator), 3)
def finvec(word, xwords, xweights):
for i in range(len(xwords)):
if xwords[i] == word:
return xweights[i]
return None
vru_words = None
vkk_words = None
vkk_vecs = None
vru_vecs = None
reader = Word2VecBinReader()
reader.readvec('E:/NlpData/rus_mixed_size(200)window(7)negative(1)cbow(0).bin')
# vru_words, vru_vecs = readvec('E:/NlpData/rus_mixed_size(200)window(7)negative(1)cbow(0).bin')
# vkk_words, vkk_vecs = readvec('E:/NlpData/kaz_mixed_size(200)window(7)negative(1)cbow(0).bin')
print('len1', len(vru_words))
print('len2', len(vkk_words))
matrix_rukk = np.zeros((len(vru_words), len(vkk_words)), dtype=np.float32)
i = 0
j = 0
for word in vru_words:
wordvec = finvec(word, vkk_words, vkk_vecs)
totalcou | nt += 1
if wordvec is not None:
alignedcount += 1
for wordi in xrange(len(vkk_words)):
vec = vkk_vecs[wordi]
# dotres = np.linalg.norm(wordvec - vec)
dotres = cosi | ne_similarity(wordvec,vec)
matrix_rukk[i, j] = dotres
j += 1
j = 0
i += 1
i = 0
j = 0
for word in vkk_words:
wordvec = finvec(word,vru_words,vru_vecs)
if wordvec is not None:
for wordi in range(len(vru_words)):
vec = vru_vecs[wordi]
dotres = cosine_similarity(wordvec,vec)
matrix_rukk[j,i] = (matrix_rukk[j,i] + dotres)/2
j += 1
j = 0
i += 1
print('total: {}, aligned; {}'.format(totalcount, alignedcount))
print('saving')
with open('rukkmatrix.txt', 'w') as w:
for i in range(matrix_rukk.shape[1]):
for j in range(matrix_rukk.shape[0]):
w.write('{0}\t'.format(matrix_rukk[i, j]))
w.write('\n')
|
ashh87/caffeine | setup.py | Python | gpl-3.0 | 1,042 | 0.013436 | #!/usr/bin/env python
from distutils.core import setup
import os
import sys
def main():
SHARE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
| "share")
data_files = []
# don't trash the users system icons!!
black_list = ['index.theme', 'index.theme~']
for path, dirs, files in os.walk(SHARE_PATH):
data_files.append(tuple((path.replace(SHARE_PATH,"share", 1),
[os.path.join(path, file) for file in files if file not in
black_list])))
setup( | name="caffeine",
version="2.4.1",
description="""A status bar application able to temporarily prevent
the activation of both the screensaver and the "sleep" powersaving
mode.""",
author="The Caffeine Developers",
author_email="bnsmith@gmail.com",
url="https://launchpad.net/caffeine",
packages=["caffeine"],
data_files=data_files,
scripts=[os.path.join("bin", "caffeine")]
)
if __name__ == "__main__":
main()
|
operepo/ope | laptop_credential/winsys/misc.py | Python | mit | 1,023 | 0.004888 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os, sys
import time
import uuid
import win32api
import win32con
import win32gui
import win32console
import win32gui
from winsys import core, registry
def set_console_title(text):
title = win32console.GetConsoleTitle()
| win32console.SetConsoleTitle(text)
return title
def console_hwnd():
title = uuid.uuid1().hex
old_title = set_console_title(title)
try:
time.sleep(0.05)
return win32gui.FindW | indow(None, title)
finally:
set_console_title(old_title)
def set_environment(**kwargs):
root = registry.registry("HKC")
env = root.Environment
for label, value in kwargs.iteritems():
env.set_value(label, value)
win32gui.SendMessageTimeout(
win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE,
0, "Environment",
win32con.SMTO_ABORTIFHUNG, 2000
)
def get_environment():
return dict(
registry.registry(r"HKCU\Environment").itervalues()
)
|
rwl/openpowersystem | dynamics/dynamics/generators/gen_sync.py | Python | agpl-3.0 | 1,631 | 0.003679 | #------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
# <<< imports
# @generated
from dynamics.dynamics.rotating_machine import RotatingMachine
from google.appengine.ext import db
# >>> imports
class GenSync(RotatingMachine):
""" Synchronous generator model. A single standard synch | ronous model is | defined for the CIM, with several variations indicated by the 'model type' attribute. This model can be used for all types of synchronous machines (salient pole, solid iron rotor, etc.).
"""
# <<< gen_sync.attributes
# @generated
# >>> gen_sync.attributes
# <<< gen_sync.references
# @generated
# >>> gen_sync.references
# <<< gen_sync.operations
# @generated
# >>> gen_sync.operations
# EOF -------------------------------------------------------------------------
|
wackerl91/luna | resources/lib/di/lazyproxy.py | Python | gpl-3.0 | 1,231 | 0.001625 | class LazyProxy(object):
def __init__(self, original_module, original_class, init_args):
self._original_module = original_module
self._original_class = original_class
self._original_init_args = init_args
self._instance = None
def __getattr__(self, name):
if self._instance is None:
self.__init_class__()
return getattr(self._instance, name)
def __init_class__(sel | f):
import importlib
module = importlib.import_module(self._original_module)
class_ = getattr(module, self._original_class)
if self._original_init_args is not None:
for index, arg in enumerate(self._original_init_args):
| if arg[:1] == '@':
from resources.lib.di.requiredfeature import RequiredFeature
self._original_init_args[index] = RequiredFeature(arg[1:]).request()
import inspect
args = inspect.getargspec(class_.__init__)[0]
if args[0] == 'self':
args.pop(0)
argument_dict = dict(zip(args, self._original_init_args))
self._instance = class_(**argument_dict)
else:
self._instance = class_()
|
GentlemanBrewing/ADCLibraries-MCP3424 | IOPi/tutorial1.py | Python | mit | 697 | 0 | #!/usr/bin/python3
"""
================================================
ABElectronics IO Pi 32-Channel Port Expander - Tutorial 1
Version 1.0 Created 29/02/2015
Requires python 3 smbus to be installed
run with: python3 tutorial1.py
================================================
This example uses the write_pin and write_port meth | ods to switch pin 1 on
and off on the IO Pi.
"""
from ABE_helpers import ABEHelpers
from ABE_IoPi import IoPi
import time |
i2c_helper = ABEHelpers()
i2c_bus = i2c_helper.get_smbus()
bus = IoPi(i2c_bus, 0x21)
bus.set_port_direction(0, 0x00)
bus.write_port(0, 0x00)
while True:
bus.write_pin(1, 1)
time.sleep(1)
bus.write_pin(1, 0)
time.sleep(1)
|
tatwell/hiring-curve | config/distributions.py | Python | gpl-3.0 | 1,031 | 0.009699 | identity = {
# https://www.census.gov/prod/cen2010/briefs/c2010br-03.pdf
'sex': [('M',49.2),('F',50.8)],
# https://en.wikipedia.org/wiki/Race_and_ethnicity_in_the_United_States
'race': [('O',72.4),('U',12.6)]
}
iq = {
# Class: (mu, sigma)
# http://www.iq | comparisonsite.com/sexdifferences.aspx
'M': (103.08, 14.54), |
'F': (101.41, 13.55),
# https://commons.wikimedia.org/wiki/File:WAIS-IV_FSIQ_Scores_by_Race_and_Ethnicity.png
'O': (103.21, 13.77),
'U': (88.67, 13.68),
# http://isteve.blogspot.com/2005/12/do-black-women-have-higher-iqs-than.html
# See the URL above for the provenance of the figures. As heritable measures of IQ,
# they are probably mostly garbage. But they provide a representative basis for a
# certain kind of "scientific" view of the world. And they were the only ones
# I came across that broke down mu and sigma values by sex and race.
'UF': (90.8, 13.58),
'UM': (88.4, 13.30),
'OF': (103.6, 13.30),
'OM': (102.7, 14.75)
}
|
championswimmer/minor-1-piBot-videostream | control-codes/python/ultrasonic.py | Python | gpl-2.0 | 1,305 | 0.006897 | #!/usr/bin/python
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#|R|a|s|p|b|e|r|r|y|P|i|-|S|p|y|.|c|o|.|u|k|
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# ultrasonic_1.py
# Measure distance using an ultrasonic module
#
# Author : Matt Hawkins
# Date : 09/01/2013
# Import required Python libraries
import time
import RPi.GPIO as GPIO
# Use BCM GPIO references
# instead of physical pin numbers
GPIO.setmode(GPIO.BCM)
# Define GPIO to use on Pi
GPIO_TRIGGER = 23
GPIO_ECHO = 24
print "Ultrasonic Measurement"
# Set pins as output and input
GPIO.setup(GPIO_TRIGGER,GPIO.OUT) # Trigger
GPIO.setup(GPIO_ECHO,GPIO.IN) # Echo
# Set trigger to False (Low)
GPIO.output(GPIO_TRIGGER, False)
# Allow module to settle
time.sleep(0.5)
# Send 10us pulse to trigger
GPIO.output(GPIO_TRIGGER, True)
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
start = time.time() |
while | GPIO.input(GPIO_ECHO)==0:
start = time.time()
while GPIO.input(GPIO_ECHO)==1:
stop = time.time()
# Calculate pulse length
elapsed = stop-start
# Distance pulse travelled in that time is time
# multiplied by the speed of sound (cm/s)
distance = elapsed * 34000
# That was the distance there and back so halve the value
distance = distance / 2
print "Distance : %.1f" % distance
# Reset GPIO settings
GPIO.cleanup()
|
starbops/OpenADM | core/src/pox_modules/uipusher.py | Python | gpl-2.0 | 6,837 | 0.041685 | import logging
from pymongo import MongoClient
import json
from bson import json_util
import time
import datetime
logger = logging.getLogger(__name__)
class UIPusher:
def __init__(self,core,parm):
# register event handler
core.registerEventHandler("controlleradapter", self.controllerHandler)
# register websocket api
core.registerURLApi("info/topology", self.topologyHandler)
core.registerURLApi("stat", self.statisticHandler)
# save core for ipc use
self.core = core
self.intervalList=['hourly','daily','weekly','monthly','annually']
self.intervalList[0] = 'hourly'+str(datetime.datetime.today().strftime("%Y_%m_%d"))
self.enable = True if parm['enable'] == "true" or parm['enable'] == "True" else False
self.limit = int(parm['queryinterval'])
self.count = 0
self.prevTime = time.time()
self.cache = {}
self.diff = {}
self.tmpcache = {}
if self.enable:
try:
self.client = MongoClient(parm['dbip'],int(parm['dbport']))
self.db = self.client[parm['db']]
self.db.authenticate(parm['user'],parm['password'])
except:
print "database connection failed"
def topologyHandler(self,request):
# return JSONP format
result = self.core.invokeIPC("periodicInquiry")
return "omniui(%s);" % result
def controllerHandler(self,event):
if self.enable:
#compute timestamp
now = time.time()
#12:35:39 -> 12:30:00
reduntTime = int(datetime.datetime.fromtimestamp(now).strftime('%M'))%10*60 + int(datetime.datetime.fromtimestamp(now).strftime('%S'))
data = json.loads(event)
self.count = self.count + 1
if int(now-reduntTime) != self.prevTime:
self.writeToDB()
for node in data['nodes']:
for flow in node['flows']:
key=flow.copy()
key.pop("counterByte",None)
key.pop("counterPacket",None)
key.pop("duration",None)
for dic in key['actions']:
if dic['type'] == "STRIP_VLAN":
key['actions'] = "".join(["{0}".format(dic['type'])])
else:
key['actions'] = "".join(["{0}:{1}".format(dic['type'],dic['value'])])
key['dpid'] = str(node['dpid'])
key['date'] = int(now - reduntTime)
if isinstance(key['actions'],list):
del key['actions']
hashkey = frozenset(key.items())
if hashkey in self.cache:
if self.diff[hashkey][2] > flow['duration']:
tmpCB = flow['counterByte']
tmpCP = flow['counterPacket']
else:
tmpCB = flow['counterByte'] - self.diff[hashkey][0]
tmpCP = flow['counterPacket'] - self.diff[hashkey][1]
self.cache[hashkey][0] += tmpCB
self.cache[hashkey][1] += tmpCP
self.cache[hashkey][2] = key
self.cache[hashkey][3] = flow['duration']
self.diff[hashkey][0] = flow['counterByte']
self.diff[hashkey][1] = flow['counterPacket']
self.diff[hashkey][2] = flow['duration']
else:
self.cache[hashkey] = [0,0,key,flow['duration']]
self.diff[hashkey] = [flow['counterByte'],flow['counterPacket'],flow['duration']]
self.prevTime = int(now-reduntTime)
if self.count >= self.limit and len(self.cache) > 0:
self.writeToDB()
self.event = event
def writeToDB(self):
self.count = 0
#access database
self.tmpcache = self.cache
self.cache={}
key={}
if len(self.tmpcache)==0:
return
##update db name
prevTime = datetime.datetime.fromtimestamp(self.prevTime).strftime("%Y_%m_%d")
self.intervalList[0] = 'hourly'+str(prevTime)
print self.intervalList[0]
for hashkey in self.tmpcache:
key = self.tmpcache[hashkey][2]
exist = self.db[self.intervalList[0]].find_one(key)
if exist is not None:
key['_id'] = exist['_id']
key['counterByte'] = self.tmpcache[hashkey][0] + exist['counterByte']
key['counterPacket'] = self.tmpcache[hashkey][1] + exist['counterPacket']
else:
key['counterByte'] = self.tmpcache | [hashkey][0]
key['counterPacket'] = self.tmpcache[hashkey][1]
key['duration'] = self.tmpcache[hashkey][3]
self.db[self.intervalList[0]].save(key) |
def statisticHandler(self,data):
if self.enable == False:
return "Time\t1\n"
#declare variable
multiGroup = {}
output = "Time"
count = 1
# for hourly query
if int(data['interval']) ==0:
fromTime = datetime.datetime.strptime(data['from'],"%Y-%m-%d")
toTime = datetime.datetime.strptime(data['to'],"%Y-%m-%d")
oneday = datetime.timedelta(days=1)
#1/26~1/27 means 1/26 00:00 to 1/27 23:59, so plus one day to toTime
toTime = toTime + oneday
keys=[]
for pattern in data['pattern']:
output+="\t"+str(count)
count = count +1
key={}
for field in pattern:
if pattern[field] !='':
key[field] = pattern[field]
currentTime = fromTime
group= {}
while currentTime != toTime:
tableName = "hourly"+currentTime.strftime("%Y_%m_%d")
currentTime = currentTime + oneday
for entry in self.db[tableName].find(key):
if entry['date'] in group:
group[entry['date']] = group[entry['date']] + entry["counterByte"]
else:
group[entry['date']] = entry["counterByte"]
for date in group:
if date in multiGroup:
multiGroup[date].append([group[date],count-1])
else:
multiGroup[date]=[[group[date],count-1]]
# for weekly,monthly...
else:
#translate datetime to timestamp
fromTime = int(time.mktime(time.strptime(data['from'],'%Y-%m-%d')))
#1/26~1/27 means 1/26 00:00 to 1/27 23:59, so plus one day to toTime
toTime = int(time.mktime(time.strptime(data['to'],'%Y-%m-%d')))+86400
#use the interval code to obtain collection name
interval = self.intervalList[ int(data['interval'])]
#flow pattern,only match non-empty field
for pattern in data['pattern']:
output+="\t"+str(count)
count = count +1
group= {}
key = {}
for field in pattern:
if pattern[field] !='':
key[field] = pattern[field]
key['date'] = {'$gte':fromTime,'$lt':toTime}
#use date to group data
for entry in self.db[interval].find(key):
if entry['date'] in group:
group[entry['date']] = group[entry['date']] + entry["counterByte"]
else:
group[entry['date']] = entry["counterByte"]
#add group to multiGroup
for date in group:
if date in multiGroup:
multiGroup[date].append([group[date],count-1])
else:
multiGroup[date]=[[group[date],count-1]]
#tsv format
output+="\n"
tmp=""
for date in sorted(multiGroup.iterkeys()):
tmp = datetime.datetime.fromtimestamp(date).strftime('%Y-%m-%d %H:%M')
#insert zero for no-traffic flow
size = count
tmpIndex = 0
for index in range(1,size):
if multiGroup[date][tmpIndex][1] == index:
tmp+=("\t"+str(multiGroup[date][tmpIndex][0]))
tmpIndex+=1
else:
pass
tmp+=("\t0")
if tmpIndex >= len(multiGroup[date]):
tmpIndex = 0
output+=tmp+"\n"
return output
|
aurarad/auroracoin | qa/rpc-tests/listsinceblock.py | Python | mit | 2,579 | 0.002714 | #!/usr/bin/env python3
# Copyright (c) 2017 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import AuroracoinTestFramework
from test_framework.util import assert_equal
class ListSinceBlockTest (AuroracoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def run_test (self):
'''
`listsinceblock` did not behave correctly when handed a block that was
no longer in the main chain:
ab0
/ \
aa1 [tx0] bb1
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Consider a client that has only seen block `aa3` above. It asks the node
to `listsinceblock aa3`. But at some point prior the main chain switched
to the bb chain.
Previously: listsinceblock would find height=4 for block aa3 and compare
this to height=5 for the ti | p of the chain (bb4). It would then return
results restricted to bb3-bb4.
Now: listsinceblock finds the fork at ab0 and returns results in the
range bb1-bb4.
This test only checks that [tx0] is present.
'''
assert_equal(self.is_network_split, False)
self.nodes[2].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(sel | f.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
# Split network into two
self.split_network()
assert_equal(self.is_network_split, True)
# send to nodes[0] from nodes[2]
senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# generate on both sides
lastblockhash = self.nodes[1].generate(6)[5]
self.nodes[2].generate(7)
print('lastblockhash=%s' % (lastblockhash))
self.sync_all()
self.join_network()
# listsinceblock(lastblockhash) should now include tx, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
found = False
for tx in lsbres['transactions']:
if tx['txid'] == senttx:
found = True
break
assert_equal(found, True)
if __name__ == '__main__':
ListSinceBlockTest().main()
|
rr-/dotfiles | cfg/alacritty/__main__.py | Python | mit | 231 | 0 | from libdotfiles.packages import try_install
from libdotfiles.util import HOME_DIR, PKG_DIR, copy_ | file
try_install("alacritty")
copy_file(
PKG_DIR / "alacritty.yml",
HOME_DIR / ".config" / "alacri | tty" / "alacritty.yml",
)
|
plotly/python-api | packages/python/plotly/plotly/validators/parcoords/line/colorbar/_showticklabels.py | Python | mit | 523 | 0 | import _pl | otly_utils.basevalidators
class ShowticklabelsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="showticklabels",
parent_name="parcoords.line.colorbar",
**kwargs
):
super(ShowticklabelsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
| )
|
tschalch/pyTray | src/lib/reportlab/graphics/testdrawings.py | Python | bsd-3-clause | 9,682 | 0.025098 | #!/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/testdrawings.py
__version__=''' $Id $ '''
"""This contains a number of routines to generate test drawings
for reportlab/graphics. For now they are contrived, but we will expand them
to try and trip up any parser. Feel free to add more.
"""
from reportlab.graphics.shapes import *
from reportlab.lib import colors
def getDrawing1():
"""Hello World, on a rectangular background"""
D = Drawing(400, 200)
D.add(Rect(50, 50, 300, 100, fillColor=colors.yellow)) #round corners
D.add(String(180,100, 'Hello World', fillColor=colors.red))
return D
def getDrawing2():
"""This demonstrates the basic shapes. There are
no groups or references. Each solid shape should have
a purple fill."""
D = Drawing(400, 200) #, fillColor=colors.purple)
D.add(Line(10,10,390,190))
D.add(Circle(100,100,20, fillColor=colors.purple))
D.add(Circle(200,100,20, fillColor=colors.purple))
D.add(Circle(300,100,20, fillColor=colors.purple))
D.add(Wedge(330,100,40, -10,40, fillColor=colors.purple))
D.add(PolyLine([120,10,130,20,140,10,150,20,160,10,
170,20,180,10,190,20,200,10]))
D.add(Polygon([300,20,350,20,390,80,300,75, 330, 40]))
D.add(Ellipse(50, 150, 40, 20))
D.add(Rect(120, 150, 60, 30,
strokeWidth=10,
strokeColor=colors.red,
fillColor=colors.yellow)) #square corners
D.add(Rect(220, 150, 60, 30, 10, 10)) #round corners
D.add(String(10,50, 'Basic Shapes', fillColor=colors.black))
return D
##def getDrawing2():
## """This drawing uses groups. Each group has two circles and a comment.
## The line style is set at group level and should be red for the left,
## bvlue for the right."""
## D = Drawing(400, 200)
##
## Group1 = Group()
##
## Group1.add(String(50, 50, 'Group 1', fillColor=colors.black))
## Group1.add(Circle(75,100,25))
## Group1.add(Circle(125,100,25))
## D.add(Group1)
##
## Group2 = Group(
## String(250, 50, 'Group 2', fillColor=colors.black),
## Circle(275,100,25),
## Circle(325,100,25)#,
##def getDrawing2():
## """This drawing uses groups. Each group has two circles and a comment.
## The line style is set at group level and should be red for the left,
## bvlue for the right."""
## D = Drawing(400, 200)
##
## Group1 = Group()
##
## Group1.add(String(50, 50, 'Group 1', fillColor=colors.black))
## Group1.add(Circle(75,100,25))
## Group1.add(Circle(125,100,25))
## D.add(Group1)
##
## Group2 = Group(
## String(250, 50, 'Group 2', fillColor=colors.black),
## Circle(275,100,25),
## Circle(325,100,25)#,
##
## #group attributes
## #strokeColor=colors.blue
## )
## D.add(Group2)
## return D
##
##
##def getDrawing3():
## """This uses a named reference object. The house is a 'subroutine'
## the basic brick colored walls are defined, but the roof and window
## color are undefined and may be set by the container."""
##
## D = Drawing(400, 200, fill=colors.bisque)
##
##
## House = Group(
## Rect(2,20,36,30, fill=colors.bisque), #walls
## Polygon([0,20,40,20,20,5]), #roof
## Rect(8, 38, 8, 12), #door
## Rect(25, 38, 8, 7), #window
## Rect(8, 25, 8, 7), #window
## Rect(25, 25, 8, 7) #window
##
## )
## D.addDef('MyHouse', House)
##
## # one row all the same color
## D.add(String(20, 40, 'British Street...',fill=colors.black))
## for i in range(6):
## x = i * 50
## D.add(NamedReference('MyHouse',
## House,
## transform=translate(x, 40),
## fill = colors.brown
## )
## )
##
## # now do a row all different
## D.add(String(20, 120, 'Mediterranean Street...',fill=colors.black))
## x = 0
## for color in (colors.blue, colors.yellow, colors.orange,
## colors.red, colors.green, colors.chartreuse):
## D.add(NamedReference('MyHouse',
## House,
## transform=translate(x,120),
## fill = color,
## )
## )
## x = x + 50
## #..by | popular demand, the mayor gets a big one at the end
## D.add(NamedReference('MyHouse',
## House,
## transform=mmult(translate(x,110), scale(1.2,1.2)),
## fill = color,
## )
## )
##
##
## return D
##
##def getDrawing4():
## """This tests that attributes are 'unset' correctly when
## one | steps back out of a drawing node. All the circles are part of a
## group setting the line color to blue; the second circle explicitly
## sets it to red. Ideally, the third circle should go back to blue."""
## D = Drawing(400, 200)
##
##
## G = Group(
## Circle(100,100,20),
## Circle(200,100,20, stroke=colors.blue),
## Circle(300,100,20),
## stroke=colors.red,
## stroke_width=3,
## fill=colors.aqua
## )
## D.add(G)
##
##
## D.add(String(10,50, 'Stack Unwinding - should be red, blue, red'))
##
## return D
##
##
##def getDrawing5():
## """This Rotates Coordinate Axes"""
## D = Drawing(400, 200)
##
##
##
## Axis = Group(
## Line(0,0,100,0), #x axis
## Line(0,0,0,50), # y axis
## Line(0,10,10,10), #ticks on y axis
## Line(0,20,10,20),
## Line(0,30,10,30),
## Line(0,40,10,40),
## Line(10,0,10,10), #ticks on x axis
## Line(20,0,20,10),
## Line(30,0,30,10),
## Line(40,0,40,10),
## Line(50,0,50,10),
## Line(60,0,60,10),
## Line(70,0,70,10),
## Line(80,0,80,10),
## Line(90,0,90,10),
## String(20, 35, 'Axes', fill=colors.black)
## )
##
## D.addDef('Axes', Axis)
##
## D.add(NamedReference('Axis', Axis,
## transform=translate(10,10)))
## D.add(NamedReference('Axis', Axis,
## transform=mmult(translate(150,10),rotate(15)))
## )
## return D
##
##def getDrawing6():
## """This Rotates Text"""
## D = Drawing(400, 300, fill=colors.black)
##
## xform = translate(200,150)
## C = (colors.black,colors.red,colors.green,colors.blue,colors.brown,colors.gray, colors.pink,
## colors.lavender,colors.lime, colors.mediumblue, colors.magenta, colors.limegreen)
##
## for i in range(12):
## D.add(String(0, 0, ' - - Rotated Text', fill=C[i%len(C)], transform=mmult(xform, rotate(30*i))))
##
## return D
##
##def getDrawing7():
## """This defines and tests a simple UserNode0 (the trailing zero denotes
## an experimental method which is not part of the supported API yet).
## Each of the four charts is a subclass of UserNode which generates a random
## series when rendered."""
##
## class MyUserNode(UserNode0):
## import whrandom, math
##
##
## def provideNode(self, sender):
## """draw a simple chart that changes everytime it's drawn"""
## # print "here's a random number %s" % self.whrandom.random()
## #print "MyUserNode.provideNode being called by %s" % sender
## g = Group()
## #g._state = self._state # this is naughty
## PingoNode.__init__(g, self._state) # is this less naughty ?
## w = 80.0
## h = 50.0
## g.add(Rect(0,0, w, h, stroke=colors.black))
## N = 10.0
## x,y = (0,h)
## dx = w/N
## for ii |
tsunam/dotd_parser | routes.production-mode.py | Python | mit | 339 | 0.014749 | #
# Currently we don't | do multi-lingual support
#
# See router.example.py for language customization hints
#
# To INSTALL: copy this file to youe base web2py installation:
# ./web2py/routes.py
# Then restart your server deployment ( web2py, apache w/mod_wsgi )
#
routers = dict(
BASE = dict(default_applicatio | n='dotd_parser'),
)
|
davidbarkhuizen/dart | OHLCVAnalysis.py | Python | mit | 1,209 | 0.07196 | from histogram import Histogram
class OHLCVAnalysis:
def __init__(self, dates, open, high, low, close, vol, start, end):
if start > end:
(start, end) = (end, start)
self.report_log = []
max = None
max_date = None
min = None
min_date = None
seq_start = dates[0]
seq_end = dates[0]
series = []
n = 0
for i in range(len(dates)):
d = dates[i]
if (d > start) and (d < end):
series.append(close[i])
if (d < seq_start):
seq_start = d
if (d > seq_end):
seq_end = d
n = n + 1
h = high[i]
if max == None:
max = h
max_date = d
else:
if h > max:
max = h
max_date = d
l = low[i]
if min == None:
min = l
min_date = d
else:
if l < min:
min = l
min_date = d
self.report_log.append('%s - %s' % (seq_start, seq_end))
self.report_log.append('%d trading days' % n)
self.report_log.append('Max = %s - %s' % (str(max), max_date))
self.re | port_log.append('Min = %s - %s' % (str(min), min_date))
h = Hist | ogram(series)
for l in h.report():
self.report_log.append(l)
def report(self):
return self.report_log
|
lock8/django-rest-framework-jwt-refresh-token | tests/urls.py | Python | mit | 248 | 0 | from django.con | f.urls import url
from refreshtoken.routers import router
from refreshtoken.views import Del | egateJSONWebToken
urlpatterns = router.urls + [
url(r'^delegate/$', DelegateJSONWebToken.as_view(),
name='delegate-tokens'),
]
|
tejesh95/Zubio.in | zubio/allauth/socialaccount/providers/github/tests.py | Python | mit | 1,978 | 0.000506 | from allauth.socialaccount.tests import create_oauth2_tests
from allauth.tests import MockedResponse
from allauth.socialaccount.providers import registry
from .provider import GitHubProvider
class GitHubTests(create_oauth2_tests(registry.by_id(GitHubProvider.id))):
def get_mocked_response(self):
return MockedResponse(200, """
{
"type":"User",
"organizations_url":"https://api.github.com/users/pennersr/orgs",
"gists_url":"https://api.github.com/users/pennersr/gists{/gist_id}",
"received_events_url":"https://api.github.com/users/pennersr/received_events",
"gravatar_id":"8639768262b8484f6a3380f8db2efa5b",
"followers":16,
"blog":"http://www.intenct.info",
"avatar_url":"https://secure.gravatar.com/avatar/8639768262b8484f6a3380f8db2efa5b?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-user-420.png",
"login":"pennersr",
"created_at":"2010-02-10T12:50:51Z",
"company":"IntenCT",
"subscriptions_url":"https://api.github.com/users/penner | sr/subscriptions",
"public_repos":14,
"hireable":false,
"url":"https://api.github.com/users/pennersr",
"public_gists":0,
"starred_url":"https://api.github.com/users/pennersr/starred{/owner}{/repo}",
| "html_url":"https://github.com/pennersr",
"location":"The Netherlands",
"bio":null,
"name":"Raymond Penners",
"repos_url":"https://api.github.com/users/pennersr/repos",
"followers_url":"https://api.github.com/users/pennersr/followers",
"id":201022,
"following":0,
"email":"raymond.penners@intenct.nl",
"events_url":"https://api.github.com/users/pennersr/events{/privacy}",
"following_url":"https://api.github.com/users/pennersr/following"
}""")
|
catchchaos/Movie-Recommender-GA- | Modular_UI.py | Python | mit | 7,347 | 0.008575 | from movielens import *
import numpy as np
import pickle
import random
import os.path
NO_OF_RECOMMENDATIONS = 10
def load_from_dataset(utility_matrix):
user = []
item = []
ratings = []
d = Dataset()
d.load_users("data/u.user", user)
d.load_items("data/u.item", item)
d.load_ratings("data/u.base", ratings)
movie_genre = []
for movie in item:
movie_genre.append([movie.unknown, movie.action, movie.adventure, movie.animation, movie.childrens, movie.comedy,
movie.crime, movie.documentary, movie.drama, movie.fantasy, movie.film_noir, movie.horror,
movie.musical, movie.mystery, movie.romance, movie.sci_fi, movie.thriller, movie.war, movie.western])
movie_genre = np.array(movie_genre)
# Find the average rating for each user and stores it in the user's object
for i in range(0, len(user)):
x = utility_matrix[i]
user[i].avg_r = sum(a for a in x if a > 0) / sum(a > 0 for a in x)
return user, item, movie_genre, ratings
def pcs(x, y, ut, user):
num = 0
den1 = 0
den2 = 0
A = ut[x - 1]
B = ut[y - 1]
num = sum((a - user[x - 1].avg_r) * (b - user[y - 1].avg_r) for a, b in zip(A, B) if a > 0 and b > 0)
den1 = sum((a - user[x - 1].avg_r) ** 2 for a in A if a > 0)
den2 = sum((b - user[y - 1].avg_r) ** 2 for b in B if b > 0)
den = (den1 ** 0.5) * (den2 ** 0.5)
if den == 0:
return 0
else:
return num / den
def pop_movies(movies, movie_clusters):
top_movies = []
with open("avg_rating.pkl", "r") as fp:
avg_rating = pickle.load(fp)
for movie, rating, cluster in zip(movies, avg_rating, movie_clusters.labels_):
# Threshold for average is low due to sparse dataset
if rating > 0.1:
top_movies.append(movie)
return random.sample(top_movies, 100)
def update_recommendations(stored_rating, new_rating, alpha):
# Update the average rating to include information abour latest preference
# Alpha determines relative importance
updated_rating = (alpha-1) * stored_rating + alpha * new_rating
return updated_rating
def recomm_main(utility_matrix, avg_ratings, demographics, pcs_matrix):
user, item, movie_genre, ratings = load_from_dataset(utility_matrix)
n_users = len(user)
n_items = len(item)
with open("cluster.pkl", "r") as fp:
cluster = pickle.load(fp)
ask = pop_movies(item, cluster)
print "Please rate the following movies (1-5):\nFill in 0 if you have not seen it:"
k=0
for movie in ask:
print movie.title + ": "
a = int(input())
if a==0:
continue
if avg_ratings[cluster.labels_[movie.id - 1]] != 0:
# avg_ratings[cluster.labels_[movie.id - 1]] = (avg_ratings[cluster.labels_[movie.id - 1]] + a) / 2
avg_ratings[cluster.labels_[movie.id - 1]] = update_recommendations(avg_ratings[cluster.labels_[movie.id - 1]], a, 0.5)
else:
avg_ratings[cluster.labels_[movie.id - 1]] = a
k = k+1
if k == 10:
break
utility_new = np.vstack((utility_matrix, avg_ratings))
user.append(demographics)
print "Finding users which have similar preferences."
for i in range(0, n_users + 1):
| if i != 943:
pcs_matrix[i] = pcs(944, i + 1, utility_new, user)
user_index = []
for i in user:
user_index.append(i.id - 1)
user_index = user_index[:943]
user_index = np.array(user_index)
top_similar = [x for (y, x) in sorted(zip(pcs_matrix, user_index), key=lambda pair: pair[0], reverse=True)]
top_5 = top_similar[:5]
top_5_cluster = []
for i in range(0, 5):
maxi = 0
maxe = 0
for j in range(0, 19):
if ma | xe < utility_matrix[top_5[i]][j]:
maxe = utility_matrix[top_5[i]][j]
maxi = j
top_5_cluster.append(maxi)
#print top_5_cluster
res = {}
for i in range(len(top_5_cluster)):
if top_5_cluster[i] not in res.keys():
res[top_5_cluster[i]] = len(top_5_cluster) - i
else:
res[top_5_cluster[i]] += len(top_5_cluster) - i
top_cluster = res.keys()[0]
movies_in_top_cluster = []
for i in item:
if cluster.labels_[i.id - 1] == top_cluster:
movies_in_top_cluster.append(i)
movie_dict = {movie.id: [0, 0, 0, 0, 0] for movie in movies_in_top_cluster}
for movie in movies_in_top_cluster:
for j in ratings:
if j.user_id in top_5 and j.item_id == movie.id:
movie_dict[movie.id][j.rating - 1] += 1
recommended_movies = None
movie_sums = []
for movie in movie_dict:
total = 0
for i, j in zip(range(0, NO_OF_RECOMMENDATIONS), movie_dict[movie]):
total += i * j
movie_sums.append(total)
recommended_movies = sorted(zip(movie_dict.keys(), movie_sums), key=lambda x: x[1], reverse=True)# print recommended_movies[:5]
final_recommendations=[]
for i in item:
if i.id in [recommended_movies[k][0] for k in range(NO_OF_RECOMMENDATIONS)]:
final_recommendations.append(i)
print i.title
return (avg_ratings, demographics, pcs_matrix, final_recommendations)
def rate_recomm(utility_matrix, avg_ratings, demographics, pcs_matrix, recommendations):
with open("cluster.pkl", "r") as fp:
cluster = pickle.load(fp)
user, item, movie_genre, ratings = load_from_dataset(utility_matrix)
user.append(demographics)
n_users = len(user)
n_items = len(item)
c = 0
for i in recommendations:
print i.title
r = input("Enter your rating\n")
if r>3.5:
c+=1
avg_ratings[cluster.labels_[i.id - 1]] = update_recommendations(avg_rating[cluster.labels_[i.id - 1]], r, 0.5)
print "Precision of predictions : ",c/5.0
for i in range(0, n_users):
if i!=943:
utility_new = np.vstack((utility_matrix, avg_ratings))
pcs_matrix[i] = pcs(944, i + 1, utility_new, user)
return avg_ratings, pcs_matrix
def UI_main():
username=raw_input("\nEnter username\n")
if os.path.exists(username + ".pkl"):
print "Old User"
flag=True
o=pickle.load(open(username+".pkl", "rb"))
avg_ratings=o.avg_ratings
demographics=o.demographics
pcs_matrix=o.pcs
recommendations=o.recommendations
else:
print "New User"
flag=False
avg_ratings = np.zeros(19)
demographics= User(944, 21, 'M', 'student', 575025)
pcs_matrix = np.zeros(943)
recommendations=[]
utility_matrix = pickle.load(open("utility_matrix.pkl", "rb"))
while(True):
ch=input("\nEnter:\n 1 for getting recommendations\n 2 for rating past recommendations\n 3 to exit\n")
if int(ch)==1:
avg_ratings, demographics, pcs_matrix, recommendations = recomm_main(utility_matrix, avg_ratings, demographics,pcs_matrix)
elif int(ch)==2:
avg_ratings, pcs_matrix = rate_recomm(utility_matrix, avg_ratings, demographics, pcs_matrix, recommendations)
else:
break
pickle.dump(NewUser(username, avg_ratings, demographics, pcs_matrix, recommendations), open(username+".pkl", "wb"))
if __name__=="__main__":
UI_main()
|
priscillaboyd/SPaT_Prediction | src/neural_network/RNN_LSTM.py | Python | apache-2.0 | 4,376 | 0.0016 | # Copyright 2017 Priscilla Boyd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
The RNN_LSTM module implements a recurrent neural network using LSTM.
"""
import csv
import numpy as np
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from tools.Utils import current_dt, get_latest_dataset_folder, get_latest_dataset
def split_test_training(data_path, sequence_length):
"""
Split data between test and training examples.
:param string data_path: Location of CSV-formatted data
:param int sequence_length: Sequence length (temporal window) to be used
:return: Training examples (X_train), training targets (y_train), test examples (X_test) and test targets (y_test)
:rtype: dataframe, dataframe, dataframe, dataframe
"""
# logic for loading the CSV, using 'result' (2nd) column as basis for prediction
with open(data_path) as f:
record = csv.reader(f, delimiter=",")
next(record, None)
spat = []
nb_of_values = 0
for line in record:
spat.append(float(line[2]))
nb_of_values += 1
# break file into chunks based on sequence length
result = []
for index in range(len(spat) - sequence_length):
result.append(spat[index: index + sequence_length])
result = np.array(result)
# divide set into 20% for test, 80% for training
row = int(round(0.8 * result.shape[0]))
train = result[:row, :]
np.random.shuffle(train)
X_train = train[:, :-1]
y_train = train[:, -1]
X_test = result[row:, :-1]
y_test = result[row:, -1]
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
return [X_train, y_train, X_test, y_test]
def build_model():
"""
Build the learning RNN model using Keras (Sequential) module.
:return: RNN model
:rtype: History object
"""
model = Sequential()
# declare the sizes of the layers (1d input and output)
layers = [1, 50, 100, 1]
# first hidden layer, using linear activation (not specified)
model.add(LSTM(layers[1], input_shape=(None, layers[0]), return_sequences=True))
model.add(Dropout(0.2))
# second hidden layer
model.add(LSTM(layers[2], return_sequences=False))
model.add(Dropout(0.2))
# third hidden layer
model.add(Dense(layers[3]))
mode | l.add(Activation("linear"))
# compile using MSE as loss function for regressio | n, RMSPROP as optimiser
model.compile(loss="mse", optimizer="RMSProp", metrics=['accuracy'])
# return the model
return model
def run_rnn(file):
# define model params
"""
Run the process to train/test a recurrent neural network using LSTM using a given dataset file.
:param string file: Location of CSV-formatted dataset file
:return: Model with expected (test) targets and associated scores
:rtype: object, dataframe, object
"""
num_epochs = 2
sequence_length = 20
# grab train and test data from CSV
X_train, y_train, X_test, y_test = split_test_training(file, sequence_length)
print(X_train)
# build model
model = build_model()
model.fit(X_train, y_train, epochs=num_epochs, batch_size=64, validation_split=0.2)
# predict
predict = model.predict(X_test)
predict = np.reshape(predict, predict.size)
# evaluate
score = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: ", score[1]*100, "%")
# save model to h5 file (same folder as data)
model_location_folder = get_latest_dataset_folder()
model.save(model_location_folder + '/RNN_' + current_dt + '.h5')
return model, y_test, predict
|
alberts/check_mk | web/plugins/icons/inventory.py | Python | gpl-2.0 | 1,927 | 0.007265 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public Licens | e as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU G | eneral Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
def paint_icon_inventory(what, row, tags, customer_vars):
if (what == "host" or row.get("service_check_command","").startswith("check_mk_active-cmk_inv!")) \
and inventory.has_inventory(row["host_name"]):
return link_to_view(html.render_icon('inv', _("Show Hardware/Software-Inventory of this host")),
row, 'inv_host' )
multisite_icons.append({
'host_columns': [ "name" ],
'paint': paint_icon_inventory,
})
|
eunchong/build | third_party/buildbot_8_4p1/buildbot/db/migrate/versions/007_add_object_tables.py | Python | bsd-3-clause | 1,552 | 0.002577 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, | Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import sqlalchemy as sa
def upgrade(migrate_engine):
metadata = sa.MetaData()
metadata.bind = migrate_engine
objects = sa.Table("objects", metadata,
sa.Column("id", sa.Integer, primary_key=True),
sa.Column('name', sa.String(128), nullable=False),
sa.Column('class_name', sa.String(128), nullable=False),
sa.UniqueConstraint('name', 'cla | ss_name', name='object_identity'),
)
objects.create()
object_state = sa.Table("object_state", metadata,
sa.Column("objectid", sa.Integer, sa.ForeignKey('objects.id'),
nullable=False),
sa.Column("name", sa.String(length=256), nullable=False),
sa.Column("value_json", sa.Text, nullable=False),
sa.UniqueConstraint('objectid', 'name', name='name_per_object'),
)
object_state.create()
|
the-dalee/gnome-2048 | core/model/commands/engine.py | Python | mit | 1,148 | 0.002613 | import i18n
class EngineCommand(object):
type = "Engine"
description = "Do nothing with engine"
def execute(self):
pass
def undo(self):
pass
class AddScore(EngineCommand):
def __init__(self, engine, score):
self.engine = engine
self.last_score = engine.score
self.new_score = engine.score + score
self.description = _("Add %(score)i points" %
{
"score": score,
})
def execute(self):
self.engine.score = self.new_score
def undo(self):
self.engine.score = self.last_score
class SetState(EngineCommand):
last_state = None
new_state = None
engine = None
def __init__(self, engine, new_state):
self.engine = engine
self.last_state = engine.state
self.new_state = new_state
self.description = _("Set game state to %(state)s" %
{
"state": str(new_state),
})
def execute(self):
self. | engine.state = self.new_state
| def undo(self):
self.engine.state = self.last_state
|
DaveTCode/CreatureRogue | battle_test.py | Python | mit | 2,852 | 0.004558 | """
Script used to test the battle state functionality. Allows the user to pick
a pair of creatures and then uses the game loop to fight them.
Will probably crash when the battle concludes because the rest of the game
will not be set up at that point.
"""
import argparse
import CreatureRogue.creature_creator as creature_creator
import CreatureRogue.settings as settings
from CreatureRogue.battle_ai import RandomMoveAi
from CreatureRogue.game import Game
from CreatureRogue.models.battle_creature import BattleCreature
from CreatureRogue.models.battle_data import BattleData
from CreatureRogue.models.creature import Creature
from CreatureRogue.models.game_data import GameData
from CreatureRogue.models.move import Move
from CreatureRogue.models.player import Player
from CreatureRogue.states.battle_state import BattleState
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("attacking_creature_id", type=int)
parser.add_argument("attacking_creature_level", type=int)
parser.add_argument("defending_creature_id", type=int)
parser.add_argument("defending_creature_level", type=int)
args = parser.parse_args()
game = Game(settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT, settings.TITLE, settings.FONT)
game.load_static_data()
game.init()
game_data = GameData()
attacking_species = game.static_game_data.species[args.attacking_creature_id]
print("You've selected a: Lv.{0} {1}".format(args.attacking_creature_level, attacking_species))
defending_species = game.static_game_data.species[args.defending_creature_id]
print | ("You've selected a: Lv.{0} {1}".format(args.defending_creature_level, defending_species))
attacking_moves = [Move(move_data) for move_data | in attacking_species.move_data_at_level(args.attacking_creature_level)]
wild_creature = BattleCreature(creature_creator.create_wild_creature(game.static_game_data, defending_species, args.defending_creature_level), game.static_game_data)
game_data.battle_data = BattleData(game_data,
BattleCreature(Creature(attacking_species, args.attacking_creature_level, None, None, creature_creator.random_stat_values(game.static_game_data.stats, 1, 15), creature_creator.zero_stat_values(game.static_game_data.stats), False, attacking_moves, 1), game.static_game_data),
RandomMoveAi(wild_creature),
wild_creature=wild_creature)
game_data.player = Player("Test", game.static_game_data, None, 0, 0)
game_data.player.pokeballs[game.static_game_data.pokeballs[1]] = 3
game.game_data = game_data
game.state = BattleState(game, game.game_data, game.battle_renderer, game.level_up_renderer, game.catch_graphic_renderer)
game.game_loop()
|
DeanSherwin/django-dynamic-scraper | tests/basic/scheduler_test.py | Python | bsd-3-clause | 2,244 | 0.006684 | #Stage 2 Update (Python 3)
import datetime
from django.test import TestCase
from django.core.exceptions import ImproperlyConfigured
from dynamic_scraper.utils.scheduler import Scheduler
class SchedulerTest(TestCase):
def test_config_wrong_def(self):
conf_dict_str = '\
"MIN_TIME" ---- 15,\n\
"MAX_TIME": 10080,\n\
"INITIAL_NEXT_ACTION_FACTOR": 10,\n\
"ZERO_ACTIONS_FACTOR_CHANGE": 20,\n\
"FACTOR_CHANGE_FACTOR": 1.3,\n'
self.assertRaises(ImproperlyConfigured, Scheduler, conf_dict_str)
def test_config_missing_value_max_time(self):
conf_dict_str = '\
"MIN_TIME": 15,\n\
"INITIAL_NEXT_ACTION_FACTOR": 10,\n\
"ZERO_ACTIONS_FACTOR_CHANGE": 20,\n\
"FACTOR_CHANGE_FACTOR": 1.3,\n'
self.assertRaises(ImproperlyConfigured, Scheduler, conf_dict_str)
def test_calc_next_action_time(self):
| conf_dict_str = '\
"MIN_TIME": 15,\n\
"MAX_TIME": 10080,\n\
"INITIAL_NEXT_ACTION_FACTOR": 10,\n\
"ZERO_ACTIONS_FACTOR_CHANGE": 20,\n\
"FACTOR_CHANGE_FACTOR": 1.3,\n'
sched = Scheduler(conf_dict_str)
# Successful action, not-initialized next action factor
result = sched.calc_next_action_time(True, None, 0)
self.assertEqual(result, (datetime.timedelta(minutes=115), 7.692, 0))
# Successful action
result = sched | .calc_next_action_time(True, 13, 9)
self.assertEqual(result, (datetime.timedelta(minutes=150), 10, 0))
# Successful action, new time delta under min time
result = sched.calc_next_action_time(True, 1, 9)
self.assertEqual(result, (datetime.timedelta(minutes=15), 0.769, 0))
# Successful action, not-initialized next action factor
result = sched.calc_next_action_time(False, None, 0)
self.assertEqual(result, (datetime.timedelta(minutes=150), 10, 1))
# Unsuccessful action, no new action factor
result = sched.calc_next_action_time(False, 10, 18)
self.assertEqual(result, (datetime.timedelta(minutes=150), 10, 19))
# Unsuccessful action, new action factor
result = sched.calc_next_action_time(False, 10, 19)
self.assertEqual(result, (datetime.timedelta(minutes=195), 13, 0)) |
coderanger/stratosphere | test/test_template.py | Python | apache-2.0 | 8,830 | 0.001133 | #
# Author:: Noah Kantrowitz <noah@coderanger.net>
#
# Copyright 2014, Balanced, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import stratosphere
from stratosphere import Ref, FindInMap
class TestTemplate(object):
def d(self, cls):
return json.loads(cls().to_json())
def test_empty(self):
class MyTemplate(stratosphere.Template):
pass
assert self.d(MyTemplate) == {'Resources': {}}
def test_parameter(self):
class MyTemplate(stratosphere.Template):
def param_Foo(self):
return {'Type': 'String'}
assert self.d(MyTemplate) == {
'Resources': {},
'Parameters': {
'Foo': {'Type': 'String'},
},
}
def test_parameter_description(self):
class MyTemplate(stratosphere.Template):
def param_Foo(self):
"""I am a teapot."""
return {'Type': 'String'}
assert self.d(MyTemplate) == {
'Resources': {},
'Parameters': {
'Foo': {
'Type': 'String',
'Description': 'I am a teapot.',
},
},
}
def test_subnet(self):
class MyTemplate(stratosphere.Template):
def subnet(self):
return {'VpcId': Ref('vpc-teapot'), 'CidrBlock': '10.0.0.0/16'}
assert self.d(MyTemplate) == {
'Resources': {
'Subnet': {
'Properties': {
'CidrBlock': '10.0.0.0/16',
'VpcId': {'Ref': 'vpc-teapot'},
},
'Type': 'AWS::EC2::Subnet',
},
},
}
def test_subnet_description(self):
class MyTemplate(stratosphere.Template):
def subnet(self):
"""I am a teapot."""
return {'VpcId': Ref('vpc-teapot'), 'CidrBlock': '10.0.0.0 | /16'}
assert self.d(MyTemplate) == {
'Resources': {
'Subnet': {
'Properties': {
'CidrBlock': '10.0.0.0/16',
'VpcId': {'Ref': 'vpc-teapot'},
'Tags': [{'Key': 'Description' | , 'Value': 'I am a teapot.'}],
},
'Type': 'AWS::EC2::Subnet',
},
},
}
def test_subnet_tags(self):
class MyTemplate(stratosphere.Template):
def subnet(self):
"""I am a teapot."""
return {
'VpcId': Ref('vpc-teapot'),
'CidrBlock': '10.0.0.0/16',
'Tags': [{'Key': 'Foo', 'Value': 'Bar'}],
}
assert self.d(MyTemplate) == {
'Resources': {
'Subnet': {
'Properties': {
'CidrBlock': '10.0.0.0/16',
'VpcId': {'Ref': 'vpc-teapot'},
'Tags': [
{'Key': 'Foo', 'Value': 'Bar'},
{'Key': 'Description', 'Value': 'I am a teapot.'},
],
},
'Type': 'AWS::EC2::Subnet',
},
},
}
def test_subnet_tags_description(self):
class MyTemplate(stratosphere.Template):
def subnet(self):
"""I am a teapot."""
return {
'VpcId': Ref('vpc-teapot'),
'CidrBlock': '10.0.0.0/16',
'Tags': [
{'Key': 'Foo', 'Value': 'Bar'},
{'Key': 'Description', 'Value': 'Short and stout'},
],
}
assert self.d(MyTemplate) == {
'Resources': {
'Subnet': {
'Properties': {
'CidrBlock': '10.0.0.0/16',
'VpcId': {'Ref': 'vpc-teapot'},
'Tags': [
{'Key': 'Foo', 'Value': 'Bar'},
{'Key': 'Description', 'Value': 'Short and stout'},
],
},
'Type': 'AWS::EC2::Subnet',
},
},
}
def test_subnet_name(self):
class MyTemplate(stratosphere.Template):
def subnet_MySubnet(self):
return {'VpcId': Ref('vpc-teapot'), 'CidrBlock': '10.0.0.0/16'}
assert self.d(MyTemplate) == {
'Resources': {
'MySubnet': {
'Properties': {
'CidrBlock': '10.0.0.0/16',
'VpcId': {'Ref': 'vpc-teapot'},
},
'Type': 'AWS::EC2::Subnet',
},
},
}
def test_subnet_object(self):
class MyTemplate(stratosphere.Template):
def subnet(self):
return {'VpcId': Ref('vpc-teapot'), 'CidrBlock': '10.0.0.0/16'}
assert type(MyTemplate().subnet().to_object()) == stratosphere.ec2.Subnet
def test_mapping(self):
class MyTemplate(stratosphere.Template):
def map_MyMap(self):
return {
'Region': {'Cidr': '10.0.0.0/16'}
}
def subnet(self):
return {
'VpcId': Ref('vpc-teapot'),
'CidrBlock': FindInMap(self.map_MyMap(), 'Region', 'Cidr')}
assert self.d(MyTemplate) == {
'Mappings': {
'MyMap': {
'Region': {
'Cidr': '10.0.0.0/16',
},
},
},
'Resources': {
'Subnet': {
'Properties': {
'CidrBlock': {'Fn::FindInMap': ['MyMap', 'Region', 'Cidr']},
'VpcId': {'Ref': 'vpc-teapot'},
},
'Type': 'AWS::EC2::Subnet',
},
},
}
def test_depends_on(self):
class MyTemplate(stratosphere.Template):
def subnet_One(self):
return {'VpcId': Ref('vpc-teapot'), 'CidrBlock': '10.0.0.0/16'}
def subnet_Two(self):
return {'VpcId': Ref('vpc-teapot'), 'CidrBlock': '10.0.0.0/16', 'DependsOn': self.subnet_One()}
assert self.d(MyTemplate)['Resources']['Two']['DependsOn'] == 'One'
def test_subclass(self):
class MyTemplate(stratosphere.Template):
def subnet(self):
return {'VpcId': Ref('vpc-teapot'), 'CidrBlock': '10.0.0.0/16'}
def subnet_Two(self):
return {'VpcId': Ref('vpc-teapot'), 'CidrBlock': '10.2.0.0/16'}
class MyTemplate2(MyTemplate):
def subnet(self):
subnet = super(MyTemplate2, self).subnet()
subnet['CidrBlock'] = '10.1.0.0/16'
return subnet
assert self.d(MyTemplate2) == {
'Resources': {
'Subnet': {
'Properties': {
'CidrBlock': '10.1.0.0/16',
'VpcId': {'Ref': 'vpc-teapot'},
},
'Type': 'AWS::EC2::Subnet',
},
'Two': {
'Properties': {
'CidrBlock': '10.2.0.0/16',
'VpcId': {'Ref': 'vpc-teapot'},
},
'Type': 'AWS::EC2::Subnet',
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.