repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
mkasa/taw | taw/sshlike.py | Python | mit | 5,073 | 0.006308 | #!/usr/bin/env python3
from __future__ import print_function
from __future__ import absolute_import
import os, click
import subprocess
from taw.util import *
from taw.taw import *
# commands/subcommands
# ==============
# SSH COMMAND
# ==============
@taw.command("ssh")
@click.argument('hostname', metavar='<host name>')
@click.argument('sshargs', nargs=-1)
@pass_global_parameters
def ssh_cmd(params, hostname, sshargs):
""" do SSH to a specified host """
ssh_like_call(params, 'ssh', hostname, sshargs)
# ==============
# MOSH COMMAND
# ==============
@taw.command("mosh")
@click.argument('hostname', metavar='<host name>')
@click.argument('moshargs', nargs=-1)
@pass_global_parameters
def mosh_cmd(params, hostname, moshargs):
""" do MOSH to a specified host """
ssh_like_call(params, 'mosh', hostname, moshargs)
# ==============
# RSSH COMMAND
# ==============
@taw.command("rssh")
@click.argument('hostname', metavar='<host name>')
@click.argument('rsshargs', nargs=-1)
@pass_global_parameters
def rssh_cmd(params, hostname, rsshargs):
""" do rSSH to a specified host """
ssh_like_call(params, 'rssh', hostname, rsshargs)
# ==============
# RSYNC COMMAND
# ==============
@taw.command("rsync")
@click.argument('hostname', metavar='<host name>')
@click.argument('rsshargs', nargs=-1)
@pass_global_parameters
def rsync_cmd(params, hostname, rsshargs):
""" do rsync to a specified host """
ssh_like_call(params, 'rsync', hostname, rsshargs)
# ==============
# SCP COMMAND
# ==============
@taw.command("scp")
@click.argument('src', nargs=-1)
@click.argument('dst', nargs=1)
@click.option('-i', 'key_file_path', help='SSH key file')
@click.option('-p', 'preserve_flag', is_flag=True, help='preserve attrs')
@click.option('-B', 'batch_flag', is_flag=True, help='batch mode')
@click.option('-C', 'compression_flag', is_flag=True, help='enable compression')
@click.option('-c', 'cypher', help='cypher type')
@click.option('-l', 'limit_bandwidth', help='bandwidth limit in Kb/s')
@click.option('-P', 'port', default=None, type=int, help='port number')
@click.option('-r', 'recursive_flag', is_flag=True, help='recursive copy')
@click.option('-q', 'quiet_flag', is_flag=True, help='quiet mode')
# TODO: support -v/-vv/-vvv, -o, -F (, -1, -2, -3, -4, -6 at lower priority)
@pass_global_parameters
def scp_cmd(params, src, dst, key_file_path, preserve_flag, batch_flag, compression_flag, cypher, limit_bandwidth, port, recursive_flag, quiet_flag):
""" do scp to/from a specified host """
args = ['scp']
if preserve_flag: args.append('-p')
if batch_flag: args.append('-B')
if compression_flag: args.append('-C')
if cypher: args += ['-c', cypher]
if limit_bandwidth: args += ['-l', limit_bandwidth]
if port: args += ['-P', port]
if recursive_flag: args.append('-r')
if quiet_flag: args.append('-q')
(dest_user, dest_host, dest_path) = decompose_rpath(dst)
copying_local_to_remote = dest_host is not None
if copying_local_to_remote:
instance = | convert_host_name_to_instance(des | t_host)
if instance.public_ip_address is None: error_exit("The instance has no public IP address")
dest_host = instance.public_ip_address
if dest_user == '_': dest_user = os.environ['USER']
if dest_user is None: dest_user = get_root_like_user_from_instance(instance)
if key_file_path is None: key_file_path = os.path.join(os.path.expanduser("~/.ssh"), instance.key_name + ".pem")
if os.path.exists(key_file_path):
args += ['-i', key_file_path]
else:
print_info("Key file '%s' does not exist.\nThe default keys might be used" % key_file_path)
args += list(src) + ["%s@%s:%s" % (dest_user, dest_host, dest_path)]
else:
# copying remote to local
sources_arr = [decompose_rpath(i) for i in src]
for host in sources_arr[1:]:
if host[1] != sources_arr[0][1]: error_exit("Multiple source hosts are not supported.")
if host[0] != sources_arr[0][0]: error_exit("Multiple source users are not supported.")
instance = convert_host_name_to_instance(sources_arr[0][1])
if instance.public_ip_address is None: error_exit("The instance has no public IP address")
src_host = instance.public_ip_address
src_user = sources_arr[0][0]
if src_user == '_': src_user = os.environ['USER']
if src_user is None: src_user = get_root_like_user_from_instance(instance)
if key_file_path is None: key_file_path = os.path.join(os.path.expanduser("~/.ssh"), instance.key_name + ".pem")
if os.path.exists(key_file_path):
args += ['-i', key_file_path]
else:
print_info("Key file '%s' does not exist.\nThe default keys might be used" % key_file_path)
args += ["%s@%s:%s" % (src_user, src_host, x[2]) for x in sources_arr]
args.append(dst)
if params.aws_dryrun:
print(" ".join(args))
return
try:
subprocess.check_call(args)
except:
pass
|
bailey-ann/diamanda-fork | diamandas/myghtyboard/views.py | Python | bsd-3-clause | 6,485 | 0.039938 | #!/usr/bin/python
# Diamanda Application Set
# myghtyboard forum
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.views.generic.list_detail import object_list
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from diamandas.myghtyboard.models import *
from diamandas.myghtyboard.context import forum as forumContext
from diamandas.myghtyboard.forms import *
def catego | ry_list(request):
"""
show all categories and their topics
"""
categories = Category.objects.all().order_by('order')
for c in categories:
#{% if forum.mods %}<u>{% trans "Moderators" %}</u>:
# {% for i in forum.mods %}
# {{ i.username }}{% if not forloop.last %},{% endif %}
# {% endfor %}
#{% endif %}
#forum = c.forum_set.all().order_by('order')
#forums = []
#for f in forum:
# f.mods = f.moderators.all()
# forums.appen | d(f)
c.forums = c.forum_set.all().order_by('order')
return render_to_response(
'myghtyboard/category_list.html',
{'categories': categories},
context_instance=RequestContext(request, forumContext(request)))
def topic_list(request, forum_id, pagination_id=1):
"""
list of topics in a forum
* forum_id - id of a Forum record
"""
prefixes = False
prefixes_list = False
if request.POST:
prefixes = request.POST.copy()
prefixes = prefixes.getlist("prefix[]")
prefixes = Prefix.objects.filter(id__in=prefixes)
try:
if prefixes and len(prefixes) > 0:
tops = TopicPrefix.objects.all().values('topic').distinct()
for i in prefixes:
tops = tops.filter(prefix=i)
topics_ids = []
for tid in tops:
topics_ids.append(tid['topic'])
topics = Topic.objects.order_by('-is_global', '-is_sticky', '-modification_date').filter(Q(forum=forum_id) | Q(is_global='1'))
topics = topics.filter(id__in=topics_ids)
else:
topics = Topic.objects.order_by('-is_global', '-is_sticky', '-modification_date').filter(Q(forum=forum_id) | Q(is_global='1'))
count = topics.count()
count = count/10
cnt = [1]
i = 1
while i <= count:
i = i+1
cnt.append(i)
forum = Forum.objects.get(id=forum_id)
name = forum.name
except:
return redirect_by_template(request, reverse('diamandas.myghtyboard.views.category_list', kwargs={}), _('There is no such forum. Please go back to the forum list.'))
if request.user.is_authenticated():
chck = Post.objects.filter(author_system=request.user).count()
else:
chck = 0
if chck < 5:
form = AddTopicWithCaptchaForm()
else:
form = AddTopicForm()
pr = False
if forum.use_prefixes:
p = Prefix.objects.filter(forums=forum)
if len(p) > 0:
pr = []
for i in p:
pr.append(i)
request.forum_id = forum_id
return object_list(
request,
topics,
paginate_by = 10,
allow_empty = True,
page = pagination_id,
context_processors = [forumContext],
extra_context = {'forum': forum, 'form': form, 'current_user': unicode(request.user), 'pr': pr, 'prefixes': prefixes, 'cnt': cnt},
template_name = 'myghtyboard/topics_list.html')
@login_required
def my_topic_list(request, show_user=False):
"""
list my topics
* show_user - if not set will show current user topics
"""
if not show_user:
show_user = unicode(request.user)
topics = Topic.objects.order_by('-modification_date').filter(author=show_user)[:50]
name = _('User Topics')
return render_to_response(
'myghtyboard/mytopics_list.html',
{'topics': topics, 'name': name},
context_instance=RequestContext(request, forumContext(request)))
@login_required
def last_topic_list(request):
"""
list last active topics
"""
topics = Topic.objects.order_by('-modification_date')[:50]
for i in topics:
pmax = i.post_set.all().count()/10
pmaxten = i.post_set.all().count()%10
if pmaxten != 0:
i.pagination_max = pmax+1
else:
i.pagination_max = pmax
name = _('Last Active Topics')
return render_to_response(
'myghtyboard/mytopics_list.html',
{'topics': topics, 'name': name},
context_instance=RequestContext(request, forumContext(request)))
@login_required
def my_posttopic_list(request, show_user=False):
"""
list topics with my posts
* show_user - if not set will show current user topics
"""
if not show_user:
show_user = unicode(request.user)
try:
topics = Post.objects.order_by('-date').filter(author=show_user).values('topic').distinct()[:50]
posts = []
for i in topics:
posts.append(int(i['topic']))
topics = Topic.objects.order_by('-modification_date').filter(id__in=posts)
for i in topics:
pmax = i.post_set.all().count()/10
pmaxten = i.post_set.all().count()%10
if pmaxten != 0:
i.pagination_max = pmax+1
else:
i.pagination_max = pmax
name = _('User Posts in Latest Topics')
except:
return render_to_response('myghtyboard/mytopics_list.html', {}, context_instance=RequestContext(request, forumContext(request)))
return render_to_response(
'myghtyboard/mytopics_list.html',
{'topics': topics, 'name': name},
context_instance=RequestContext(request, forumContext(request)))
def post_list(request, topic_id, pagination_id):
"""
list post in topic with a generic pagination view
* topic_id - id of a Topic entry
"""
try:
topic = Topic.objects.get(id=topic_id)
except Topic.DoesNotExist:
return HttpResponseRedirect(reverse('diamandas.myghtyboard.views.category_list', kwargs={}))
if topic.is_locked:
opened = False
else:
opened = True
if topic.author_anonymous == False:
try:
topic.author_system
except:
topic.author_anonymous = True
topic.author_system = None
topic.save()
if topic.author_anonymous == False and request.user == topic.author_system:
is_author = True
else:
is_author = False
forum = topic.forum
request.forum_id = forum.id
form = AddPostForm()
posts = topic.post_set.all().order_by('date')
count = posts.count()
count = count/10
cnt = [1]
i = 1
while i <= count:
i = i+1
cnt.append(i)
return object_list(
request,
posts,
paginate_by = 10,
page = pagination_id,
context_processors = [forumContext],
extra_context = {
'opened': opened,
'is_author': is_author,
'topic': topic,
'cnt': cnt,
'forum_id': forum.id,
'form': form,
'forum_name': forum,
'current_user': unicode(request.user)},
template_name = 'myghtyboard/post_list.html')
|
drincruz/luigi | test/task_test.py | Python | apache-2.0 | 4,097 | 0.000976 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import doctest
from helpers import unittest
from datetime import datetime, timedelta
import luigi
import luigi.task
from luigi.task_register import load_task
class DummyTask(luigi.Task):
param = luigi.Parameter()
bool_param = luigi.BoolParameter()
int_param = luigi.IntParameter()
float_param = luigi.FloatParameter()
date_param = luigi.DateParameter()
datehour_param = luigi.DateHourParameter()
timedelta_param = luigi.TimeDeltaParameter()
list_param = luigi.Parameter(is_list=True)
insignificant_param = luigi.Parameter(significant=False)
class DefaultInsignificantParamTask(luigi.Task):
insignificant_param = luigi.Parameter(significant=False, default='value')
necessary_param = luigi.Parameter(significant=False)
class TaskTest(unittest.TestCase):
def test_tasks_doctest(self):
doctest.testmod(luigi.task)
def test_task_to_str_to_task(self):
params = dict(
param='test',
bool_param=True,
int_param=666,
float_param=123.456,
date_param=datetime(2014, 9, 13).date(),
datehour_param=datetime(2014, 9, 13, 9),
timedelta_param=timedelta(44), # doesn't support seconds
| list_param=['in', 'flames'],
insignificant_param='test')
original = DummyTask(**params)
other = DummyTask.from_str_params(original.to_str_params())
self.assertE | qual(original, other)
def test_task_from_str_insignificant(self):
params = {'necessary_param': 'needed'}
original = DefaultInsignificantParamTask(**params)
other = DefaultInsignificantParamTask.from_str_params(params)
self.assertEqual(original, other)
def test_task_missing_necessary_param(self):
with self.assertRaises(luigi.parameter.MissingParameterException):
DefaultInsignificantParamTask.from_str_params({})
def test_external_tasks_loadable(self):
task = load_task("luigi", "ExternalTask", {})
assert(isinstance(task, luigi.ExternalTask))
def test_id_to_name_and_params(self):
task_id = "InputText(date=2014-12-29)"
(name, params) = luigi.task.id_to_name_and_params(task_id)
self.assertEquals(name, "InputText")
self.assertEquals(params, dict(date="2014-12-29"))
def test_id_to_name_and_params_multiple_args(self):
task_id = "InputText(date=2014-12-29,foo=bar)"
(name, params) = luigi.task.id_to_name_and_params(task_id)
self.assertEquals(name, "InputText")
self.assertEquals(params, dict(date="2014-12-29", foo="bar"))
def test_id_to_name_and_params_list_args(self):
task_id = "InputText(date=2014-12-29,foo=[bar,baz-foo])"
(name, params) = luigi.task.id_to_name_and_params(task_id)
self.assertEquals(name, "InputText")
self.assertEquals(params, dict(date="2014-12-29", foo=["bar", "baz-foo"]))
def test_flatten(self):
flatten = luigi.task.flatten
self.assertEquals(sorted(flatten({'a': 'foo', 'b': 'bar'})), ['bar', 'foo'])
self.assertEquals(sorted(flatten(['foo', ['bar', 'troll']])), ['bar', 'foo', 'troll'])
self.assertEquals(flatten('foo'), ['foo'])
self.assertEquals(flatten(42), [42])
self.assertEquals(flatten((len(i) for i in ["foo", "troll"])), [3, 5])
self.assertRaises(TypeError, flatten, (len(i) for i in ["foo", "troll", None]))
if __name__ == '__main__':
unittest.main()
|
mozillazg/bookmarks | bookmarks/extensions.py | Python | mit | 363 | 0 | # -*- coding: utf-8 -*-
from flask_bcrypt import Bcrypt
from flask_admin import Admin
from flask_cache import Cache
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
admin = Admin(name='Bookmarks')
bcrypt = Bcrypt()
cache = Cache()
db = SQLAlchemy()
migrat | e = Migrate()
login_manager = LoginManager()
| |
jabesq/home-assistant | homeassistant/components/emulated_roku/__init__.py | Python | apache-2.0 | 2,359 | 0 | """Support for Roku API emulation."""
import voluptuous as vol
from homeassistant import config_entries, util
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from .binding import EmulatedRoku
from .config_flow import configured_servers
from .const import (
CONF_ADVERTISE_IP, CONF_ADVERTISE_PORT, CONF_HOST_IP, CONF_LISTEN_PORT,
CONF_SERVERS, CONF_UPNP_BIND_MULTICAST, DOMAIN)
SERVER_CONFIG_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_LISTEN_PORT): cv.port,
vol.Optional(CONF_HOST_IP): cv.string,
vol.Optional(CONF_ADVERTISE_IP): cv.string,
vol.Optional(CONF_ADVERTISE_PORT): cv.port,
vol.Optional(CONF_UPNP_BIND_MULTICAST): cv.boolean
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_SERVERS):
vol.All(cv.ensure_list, [SERVER_CONFIG_SCHEMA]),
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the emulated roku component."""
conf = config.get(DOMAIN)
if conf is None:
return True
existing_servers = configured_servers(hass)
for entry in conf[CONF_SERVERS]:
if entry[CONF_NAME] not in existing_servers:
hass.async_create_task(hass.config_entries.flow.async_init(
DOMAIN,
context={'source': config_entries.SOURCE_IMPORT},
data=entry
))
return True
async def async_setup_entry(hass, config_entry):
"""Set up an emulated roku server from a config entry."""
config = config_entry.data
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
name = config[CONF_NAME]
listen_port = config[CONF_LISTEN_PORT]
host_ip = config.get(CONF_HOST_IP) or util.get_local_ip()
advertise_ip = config.get(CONF_ADVERTISE_IP)
advertise_port = config.get(CONF_ADVERTISE_PORT)
upnp_bind_multicast = config.get(CONF_UPNP_ | BIND_MULTICAST)
server = EmulatedRoku(hass, name, host_ip, listen_port,
advertise_ip, advertise_port, upnp_bind_multicast)
hass.data[DOMAIN][name] = server
|
return await server.setup()
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
name = entry.data[CONF_NAME]
server = hass.data[DOMAIN].pop(name)
return await server.unload()
|
isudox/leetcode-solution | python-algorithm/leetcode/problem_538.py | Python | mit | 1,776 | 0 | """538. Convert BST to Greater Tree
https://leetcode.com/problems/convert-bst-to-greater-tree/
Given a Binary Search Tree (BST), convert it to a Greater Tree such that
every key of the original BST is changed to the original key plus sum of
all keys greater than the original key in BST.
Example:
Input: The root of a Binary Search Tree like this:
5
/ \
2 13
Output: The root of a Greater Tree like this:
18
/ \
20 13
Note: This question is the same as 1038:
https://leetcode.com/problems/binary-search-tree-to-greater-sum-tree/
"""
from common.tree_node import TreeNode
class Solution:
def convert_bst(self, root: TreeNode) -> TreeNode:
seq_node, seq_val, stack = [], [], []
# in-order traversal.
p = root
while p or stack:
if p:
stack.append(p)
p = p.left
else:
node = stack.pop()
seq_node.append(node)
seq_val.append(node.val)
| p = node.right
for i in range(len(seq_val) - 1):
seq_node[i].val = sum(seq_val[i:])
return root
def convert_bst_1(self, root: TreeNode) -> TreeNode:
p = root
stack, node_seq = [], []
# reversed in-order traversal.
while p or sta | ck:
if p:
stack.append(p)
p = p.right
else:
node = stack.pop()
node_seq.append(node)
p = node.left
pre_sum = 0
for node in node_seq:
pre_sum += node.val
node.val = pre_sum
return root
def convert_bst_2(self, root: TreeNode) -> TreeNode:
pass
|
msegado/edx-platform | common/djangoapps/student/tests/test_recent_enrollments.py | Python | agpl-3.0 | 8,149 | 0.003436 | """
Tests for the recently enrolled messaging within the Dashboard.
"""
import datetime
import unittest
import ddt
from django.conf import settings
from django.urls import reverse
from django.utils.timezone import now
from opaque_keys.edx import locator
from pytz import UTC
from six.moves import range, zip
from common.test.utils import XssTestMixin
from course_modes.tests.factories import CourseModeFactory
from openedx.core.djangoapps.site_configuration.tests.test_util import with_site_configuration_context
from student.models import CourseEnrollment, DashboardConfiguration
from student.tests.factories import UserFactory
from student.views import get_course_enrollments
from student.views.dashboard import _get_recently_enrolled_courses
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@ddt.ddt
class TestRecentEnrollments(ModuleStoreTestCase, XssTestMixin):
"""
Unit tests for getting the list of courses for a logged in user
"""
PASSWORD = 'test'
def setUp(self):
"""
Add a student
"""
super(TestRecentEnrollments, self).setUp()
self.student = UserFactory()
self.student.set_password(self.PASSWORD)
self.student.save()
# Old Course
old_course_location = locator.CourseLocator('Org0', 'Course0', 'Run0')
__, enrollment = self._create_course_and_enrollment(old_course_location)
enrollment.created = datetime.datetime(1900, 12, 31, 0, 0, 0, 0, tzinfo=UTC)
enrollment.save()
# New Course
course_location = locator.CourseLocator('Org1', 'Course1', 'Run1')
self.course, self.enrollment = self._create_course_and_enrollment(course_location)
def _create_course_and_enrollment(self, course_location):
""" Creates a course and associated enrollment. """
course = CourseFactory.create(
org=course_location.org,
number=course_location.course,
run=course_location.run
)
enrollment = CourseEnrollment.enroll(self.student, course.id)
return course, enrollment
def _configure_message_timeout(self, timeout):
"""Configure the amount of time the enrollment message will be displayed. """
config = DashboardConfiguration(recent_enrollment_time_delta=timeout)
config.save()
def test_recently_enrolled_courses(self):
"""
Test if the function for filtering recent enrollments works appropriately.
"""
self._configure_message_timeout(60)
# get courses through iterating all courses
courses_list = list(get_course_enrollments(self.student, None, []))
self.assertEqual(len(courses_list), 2)
recent_course_list = _get_recently_enrolled_courses(courses_list)
self.assertEqual(len(recent_course_list), 1)
def test_zero_second_delta(self):
"""
Tests that the recent enrollment list is empty if configured to zero seconds.
"""
self._configure_message_timeout(0)
courses_list = list(get_course_enrollments(self.student, None, []))
self.assertEqual(len(courses_list), 2)
recent_course_list = _get_recently_enrolled_courses(courses_list)
self.assertEqual(len(recent_course_list), 0)
def test_enrollments_sorted_most_recent(self):
"""
Test that the list of newly created courses are properly sorted to show the most
recent enrollments first.
Also test recent enrollment message rendered appropriately for more than two courses.
"""
self._configure_message_timeout(600)
# Create a number of new enrollments and courses, and force their creation behind
# the first enrollment
| courses = []
for idx, seconds_past in zip(list(range(2, 6)), [5, 10, 15, 20]):
course_location = locator.CourseLocator(
'Org{num}'.format(num=idx),
'Course{num}'.format(num=idx),
| 'Run{num}'.format(num=idx)
)
course, enrollment = self._create_course_and_enrollment(course_location)
enrollment.created = now() - datetime.timedelta(seconds=seconds_past)
enrollment.save()
courses.append(course)
courses_list = list(get_course_enrollments(self.student, None, []))
self.assertEqual(len(courses_list), 6)
recent_course_list = _get_recently_enrolled_courses(courses_list)
self.assertEqual(len(recent_course_list), 5)
self.assertEqual(recent_course_list[1].course.id, courses[0].id)
self.assertEqual(recent_course_list[2].course.id, courses[1].id)
self.assertEqual(recent_course_list[3].course.id, courses[2].id)
self.assertEqual(recent_course_list[4].course.id, courses[3].id)
self.client.login(username=self.student.username, password=self.PASSWORD)
response = self.client.get(reverse("dashboard"))
# verify recent enrollment message
self.assertContains(
response,
'Thank you for enrolling in:'.format(course_name=self.course.display_name)
)
self.assertContains(
response,
', '.join(enrollment.course.display_name for enrollment in recent_course_list)
)
def test_dashboard_rendering_with_single_course(self):
"""
Tests that the dashboard renders the recent enrollment message appropriately for single course.
"""
self._configure_message_timeout(600)
self.client.login(username=self.student.username, password=self.PASSWORD)
response = self.client.get(reverse("dashboard"))
self.assertContains(
response,
"Thank you for enrolling in {course_name}".format(course_name=self.course.display_name)
)
def test_dashboard_rendering_with_two_courses(self):
"""
Tests that the dashboard renders the recent enrollment message appropriately for two courses.
"""
self._configure_message_timeout(600)
course_location = locator.CourseLocator(
'Org2',
'Course2',
'Run2'
)
course, _ = self._create_course_and_enrollment(course_location)
self.client.login(username=self.student.username, password=self.PASSWORD)
response = self.client.get(reverse("dashboard"))
courses_enrollments = list(get_course_enrollments(self.student, None, []))
courses_enrollments.sort(key=lambda x: x.created, reverse=True)
self.assertEqual(len(courses_enrollments), 3)
recent_course_enrollments = _get_recently_enrolled_courses(courses_enrollments)
self.assertEqual(len(recent_course_enrollments), 2)
self.assertContains(
response,
"Thank you for enrolling in:".format(course_name=self.course.display_name)
)
self.assertContains(
response,
' and '.join(enrollment.course.display_name for enrollment in recent_course_enrollments)
)
def test_dashboard_escaped_rendering(self):
"""
Tests that the dashboard renders the escaped recent enrollment messages appropriately.
"""
self._configure_message_timeout(600)
self.client.login(username=self.student.username, password=self.PASSWORD)
# New Course
course_location = locator.CourseLocator('TestOrg', 'TestCourse', 'TestRun')
xss_content = "<script>alert('XSS')</script>"
course = CourseFactory.create(
org=course_location.org,
number=course_location.course,
run=course_location.run,
display_name=xss_content
)
CourseEnrollment.enroll(self.student, course.id)
response = self.client.get(reverse("dashboard"))
self.assertContains(response, "Thank you for enrolling in")
# Check if response is escaped
self.assert_no_xss(response, xss_content)
|
andymckay/addons-server | src/olympia/translations/query.py | Python | bsd-3-clause | 5,271 | 0 | import itertools
from django.conf import settings
from django.db import models
from django.utils import translation as translation_utils
from olympia.addons.query import IndexCompiler, IndexQuery
def order_by_translation(qs, fieldname):
"""
Order the QuerySet by the translated field, honoring the current and
fallback locales. Returns a new QuerySet.
The model being sorted needs a get_fallback() classmethod that describes
the fallback locale. get_fallback() can retur | n a string or a Field.
"""
if fieldname.startswith('-'):
desc = True
fieldname = fieldname[1:]
else:
desc = False
qs = qs.all() |
model = qs.model
field = model._meta.get_field(fieldname)
# connection is a tuple (lhs, table, join_cols)
connection = (model._meta.db_table, field.rel.to._meta.db_table,
field.rel.field_name)
# Doing the manual joins is flying under Django's radar, so we need to make
# sure the initial alias (the main table) is set up.
if not qs.query.tables:
qs.query.get_initial_alias()
# Force two new joins against the translation table, without reusing any
# aliases. We'll hook up the language fallbacks later.
# Passing `reuse=set()` force new joins, and passing `nullable=True`
# forces django to make LEFT OUTER JOINs (otherwise django, because we are
# building the query manually, does not detect that an inner join would
# remove results and happily simplifies the LEFT OUTER JOINs to
# INNER JOINs)
qs.query = qs.query.clone(TranslationQuery)
t1 = qs.query.join(connection, join_field=field, reuse=set(),
nullable=True)
t2 = qs.query.join(connection, join_field=field, reuse=set(),
nullable=True)
qs.query.translation_aliases = {field: (t1, t2)}
f1, f2 = '%s.`localized_string`' % t1, '%s.`localized_string`' % t2
name = 'translated_%s' % field.column
ifnull = 'IFNULL(%s, %s)' % (f1, f2)
prefix = '-' if desc else ''
return qs.extra(select={name: ifnull},
where=['(%s IS NOT NULL OR %s IS NOT NULL)' % (f1, f2)],
order_by=[prefix + name])
class TranslationQuery(IndexQuery):
"""
Overrides sql.Query to hit our special compiler that knows how to JOIN
translations.
"""
def clone(self, klass=None, **kwargs):
# Maintain translation_aliases across clones.
c = super(TranslationQuery, self).clone(klass, **kwargs)
c.translation_aliases = self.translation_aliases
return c
def get_compiler(self, using=None, connection=None):
# Call super to figure out using and connection.
c = super(TranslationQuery, self).get_compiler(using, connection)
return SQLCompiler(self, c.connection, c.using)
class SQLCompiler(IndexCompiler):
"""Overrides get_from_clause to LEFT JOIN translations with a locale."""
def get_from_clause(self):
# Temporarily remove translation tables from query.tables so Django
# doesn't create joins against them.
old_tables = list(self.query.tables)
for table in itertools.chain(*self.query.translation_aliases.values()):
self.query.tables.remove(table)
joins, params = super(SQLCompiler, self).get_from_clause()
# fallback could be a string locale or a model field.
params.append(translation_utils.get_language())
if hasattr(self.query.model, 'get_fallback'):
fallback = self.query.model.get_fallback()
else:
fallback = settings.LANGUAGE_CODE
if not isinstance(fallback, models.Field):
params.append(fallback)
# Add our locale-aware joins. We're not respecting the table ordering
# Django had in query.tables, but that seems to be ok.
for field, aliases in self.query.translation_aliases.items():
t1, t2 = aliases
joins.append(self.join_with_locale(t1))
joins.append(self.join_with_locale(t2, fallback))
self.query.tables = old_tables
return joins, params
def join_with_locale(self, alias, fallback=None):
# This is all lifted from the real sql.compiler.get_from_clause(),
# except for the extra AND clause. Fun project: fix Django to use Q
# objects here instead of a bunch of strings.
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
mapping = self.query.alias_map[alias]
# name, alias, join_type, lhs, lhs_col, col, nullable = mapping
name, alias, join_type, lhs, join_cols, _, join_field = mapping
lhs_col = join_field.column
rhs_col = join_cols
alias_str = '' if alias == name else (' %s' % alias)
if isinstance(fallback, models.Field):
fallback_str = '%s.%s' % (qn(self.query.model._meta.db_table),
qn(fallback.column))
else:
fallback_str = '%s'
return ('%s %s%s ON (%s.%s = %s.%s AND %s.%s = %s)' %
(join_type, qn(name), alias_str,
qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col),
qn(alias), qn('locale'), fallback_str))
|
honahursey/pyFDA | work/min_order_common.py | Python | apache-2.0 | 1,902 | 0.028917 | # -*- coding: utf-8 -*-
"""
Common design parameters for minimum order design methods
@author: Christian Muenker
"""
from __future__ import print_function, division, unicode_literals
#from importlib import import_module
#import filterbroker as fb
class min_order_common(object):
def __init__(self):
self.name = {'common':'Common filter params'}
# message for min. filter order response types:
msg_min = ("Enter the maximum pass band ripple, minimum stop band "
"attenuation and the corresponding corner frequencies.")
# VISIBLE widgets for all man. / min. filter order response types:
vis_min = ['fo','fspecs','aspecs'] # mi | nimum filter order
# ENABLED widgets for all man. / min. filter order response types:
enb_min = ['fo','fspecs','aspecs'] # minimum filter order
# common parameters for all man. / min. filter order response types:
par_min = ['f_S', 'A_PB', 'A_SB'] # enabled widget for min. filt. order
# Common data for all man. / min. filter order r | esponse types:
# This data is merged with the entries for individual response types
# (common data comes first):
self.com = {"min":{"enb":enb_min, "msg":msg_min, "par": par_min}}
self.rt = {
"LP": {"min":{"par":['f_S','A_PB','A_SB','F_PB','F_SB']}},
"HP": {"min":{"par":['f_S','A_PB','A_SB','F_SB','F_PB']}},
"BP": {"min":{"par":['f_S','A_PB','A_SB','A_SB2',
'F_SB','F_PB','F_PB2','F_SB2']}},
"BS": {"min":{"par":['f_S','A_PB','A_SB','A_PB2',
'F_PB','F_SB','F_SB2','F_PB2']}}
# "HIL": {"man":{"par":['F_SB', 'F_PB', 'F_PB2', 'F_SB2','A_SB','A_PB','A_SB2']}}
#"DIFF":
}
|
Wiredcraft/maestro-ng | maestro/__main__.py | Python | lgpl-3.0 | 7,128 | 0.000281 | #!/usr/bin/env python
# Copyright (C) 2013-2014 SignalFuse, Inc.
#
# Docker container orchestration utility.
from __future__ import print_function
import argparse
import jinja2
import logging
import os
import sys
import traceback
import yaml
from . import exceptions, maestro
from . import name, version
DEFAULT_MAESTRO_FILE = 'maestro.yaml'
def load_config_from_file(filename):
"""Load a config from the given file.
Args:
filename (string): Path to the YAML environment description
configuration file to load. Use '-' for stdin.
Returns:
A python data structure corresponding to the YAML configuration.
"""
if filename == '-':
template = jinja2.Template(sys.stdin.read())
else:
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(filename)),
extensions=['jinja2.ext.with_'])
try:
template = env.get_template(os.path.basename(filename))
except jinja2.exceptions.TemplateNotFound:
raise exceptions.MaestroException(
'Environment description file {} not found!'.format(filename))
except:
raise exceptions.MaestroException(
'Error reading environment description file {}!'.format(
filename))
return yaml.load(template.render(env=os.environ))
def create_parser():
"""Create the Maestro argument parser."""
parser = argparse.ArgumentParser(prog=name, description=(
'{} v{}, Docker container orchestrator.'.format(
name.title(), version)))
parser.add_argument(
'-f', '--file', metavar='FILE',
default=DEFAULT_MAESTRO_FILE,
help=('read environment description from FILE ' +
'(use - for stdin, defaults to ./{})'
.format(DEFAULT_MAESTRO_FILE)))
parser.add_argument(
'-v', '--version', action='version',
version='{}-{}'.format(name, version),
help='show program version and exit')
subparsers = parser.add_subparsers(
dest='command',
metavar='{{{}}}'.format(','.join(maestro.AVAILABLE_MAESTRO_COMMANDS)))
common = argparse.ArgumentParser(add_help=False)
common.add_argument(
'things', nargs='*', metavar='thing',
help='container(s) or service(s) to display')
concurrent = argparse.ArgumentParser(add_help=False)
concurrent.add_argument(
'-c', '--concurrency', metavar='LIMIT', type=int, default=None,
help='limit how many containers can be acted on at the same time')
concurrent.add_argument(
'-d', '--with-dependencies', action='store_true',
help='include dependencies')
concurrent.add_argument(
'-i', '--ignore-dependencies', action='store_true',
help='ignore dependency order')
with_refresh = argparse.ArgumentParser(add_help=False)
refresh_or_reuse_group = with_refresh.add_mutually_exclusive_group()
refresh_or_reuse_group.add_argument(
'-r', '--refresh-images', action='store_true',
help='force refresh of container images from registry')
refresh_or_reuse_group.add_argument(
'--reuse', action='store_true',
help='reuse existing container if it exists')
# status
subparser = subparsers.add_parser(
parents=[common, concurrent],
name='status',
description='Display container status',
help='display container status')
subparser.add_argument(
'-F', '--full', action='store_true',
help='show full status with port state')
# pull
subparser = subparsers.add_parser(
parents=[common, concurrent],
name='pull',
description='Pull container images from registry',
help='pull container images from registry')
# start
subparser = subparsers.add_parser(
parents=[common, concurrent, with_refresh],
name='start',
description='Start services and containers',
help='start services and containers')
# stop
subparser = subparsers.add_parser(
parents=[common, concurrent],
name='stop',
description='Stop services and containers',
help='stop services and containers')
# restart
subparser = subparsers.add_parser(
parents=[common, concurrent, with_refresh],
name='restart',
description='Restart services and containers',
help='restart services and containers')
subparser.add_argument(
'--step-delay', type=int, default=0,
help='delay, in seconds, between each container')
subparser.add_argument(
'--stop-start-delay', type=int, default=0,
help='delay, in seconds, between stopping and starting each container')
subparser.add_argument(
'--only-if-changed', action='store_true',
help='only restart if the container image was updated')
# clean
subparser = subparsers.add_parser(
parents=[common, concurrent],
name='clean',
description='Cleanup and remove stopped containers',
help='remove stopped containers')
# logs
subparser = subparsers.add_parser(
parents=[common],
name='logs',
description='Show logs for a container',
help='show logs from a container')
subparser.add_argument(
'-F', '--follow', action='store_true',
help='follow logs as they are generated')
subparser.add_argument(
'-n', metavar='LINES', type=int,
help='Only show the last LINES lines for logs')
# deptree
subparser = subparsers.add_parser(
parents=[common],
name='deptree',
description='Display the service dependency tree',
help='show the dependency tree')
subparser.add_argument(
'-r', '--recursive', action='store_true',
help='show dependencies recursively (possible duplicates)')
# complete
subparser = subparsers.add_parser(
name='complete',
description='Auto-complete helper',
help='shell auto-completion helper')
subparser.add_argument(
'tokens', nargs='*',
help='command tokens')
| return parser
def main(args=None, config=None):
options = create_parser().parse_args(args)
# Only helps with Python3
if not options.command:
options.command = 'status'
if config is None:
config = load_config_from_file(options.file)
# Shutup urllib3, wherever it comes from.
(logging.getLogger('requests.packages.urlli | b3.connectionpool')
.setLevel(logging.WARN))
(logging.getLogger('urllib3.connectionpool')
.setLevel(logging.WARN))
try:
c = maestro.Conductor(config)
if options.command != 'complete' and not options.things:
options.things = c.services.keys()
options.with_dependencies = not options.ignore_dependencies
getattr(c, options.command)(**vars(options))
return 0
except KeyboardInterrupt:
pass
except:
traceback.print_exc()
return 1
if __name__ == '__main__':
sys.exit(main())
|
Remolten/galena | tests/conftest.py | Python | bsd-3-clause | 290 | 0 | import pytest
import gal | ena
@pytest.fixture
def game():
return galena.Game()
@pytest.fixture
def entity(game):
return game.c | reate_entity()
@pytest.fixture
def entity2(game):
return game.create_entity()
@pytest.fixture
def entity3(game):
return game.create_entity()
|
glaserti/LibraryTwitter | Python/4 - BaseStats.py | Python | mit | 16,267 | 0.011437 | #
# Basic Statistics on the Twitter Accounts
#
# In this section, some basic statistics for the Twitter Accounts of the given groups of libraries (i.e. National libraries, University libraries, Public libraries) will be collected.
#
# The functions will return a list of dictionaries and save it as a CSV to the cwd.
#
# The dictionaries have as keys:
#
# - 'created_at' ( = the Twitter Time Stamp),
# - 'created_at_sec' ( = the date in seconds (from 1970-01-01),
# - 'days' (= the number of days since created_at),
# - 'days_since_last_tweet',
# - 'followers_count',
# - 'friends_count',
# - 'id_str' ( = the Twitter ID as a string),
# - 'location' ( = if a location is given in the description of the account),
# - 'screen_name' ( = the Twitter handle/username),
# - 'statuses_count' ( = Nr. of Tweets),
# - 'tweets_per_day',
# - 'tweets_per_year'
#
# Finally, there is a Report section, in which an overview is provided. For each library group will be printed out:
#
# - The number of libraries,
# - the median of the groups' Tweets per day,
# - the oldest and latest library @ Twitter with their Tweets per day ratio,
# - a list of no longer actively tweeting libraries
# - the libraries with the most and least Tweets and
# - a summary for each library.
#
#
# Function definitions
#
# authenticating @ Twitter
# Function definition taken from Mining the Social Web, 2. Ed.
# cf. https://github.com/ptwobrussell/Mining-the-Social-Web-2nd-Edition
'''
Go to http://dev.twitter.com/apps/new to create an app and get values
for these credentials, which you'll need to provide in place of these
empty string values that are defined as placeholders.
See https://dev.twitter.com/docs/auth/oauth for more information
on Twitter's OAuth implementation.
'''
#importing libraries
import twitter
CONSUMER_KEY =
CONSUMER_SECRET =
OAUTH_TOKEN =
OAUTH_TOKEN_SECRET =
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,
CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth=auth)
#import & export CSV
import csv
def impCSV(input_file):
'''
input_file = csv with keys: "URL | ", "Twitter"
output = list of dictionaries
'''
f = open(input_file, 'r | ')
d = csv.DictReader(f)
LoD = [] # list of dictionaries
for row in d:
LoD.append(row)
f.close()
return LoD
def exp2CSV(listOfDict, filename):
'''
arguments = list of dictionaries, filename
output = saves file to cwd (current working directory)
'''
#creating the filename of the csv with current datestamp
import datetime
datestamp = datetime.datetime.now().strftime('%Y-%m-%d')
outputfile = filename[:-4]+ '_' + datestamp + '.csv'
keyz = listOfDict[0].keys()
f = open(outputfile,'w')
dict_writer = csv.DictWriter(f,keyz)
dict_writer.writer.writerow(keyz)
dict_writer.writerows(listOfDict)
f.close()
#
# Functions for the Data Mining
#
#importing libraries
import json #for pretty printing
import time #for calculating Tweets per day
import operator #for sorting dictionaries
from collections import Counter #for turning lists to dictionaries etc.
from prettytable import PrettyTable #for pretty printing in a table
# getting the ListOfScreenNames
def getLoSN(csvFile):
'''
input = csv filename of list of dictionaries with a key "Twitter" (where the Screenname is saved)
returns a list of tuples with t[0] = libLocation, t[1] = Twitter screenname
'''
LoD = impCSV(csvFile)
ListOfScreenNamesLocationTuples = []
for i in LoD:
ListOfScreenNamesLocationTuples.append((i['Ort'], i['Twitter']))
return ListOfScreenNamesLocationTuples
#getting basic infos for a given account incl. last status update
# users.lookup = max. 100 Anfragen pro Session! Not a problem in this section of the queries.
def AccountInfo(L):
'''
input = list of tuples with str of screen_names and location
output = list of tuples with t[0] = libLocation, t[1] = lists of dictionaries
'''
outputList = []
errorList = [] #implementation of error checking via "try" or something like that!
for n in L:
search_results = twitter_api.users.lookup(screen_name=n[1])
outputList.append((n[0], search_results))
return outputList
# getting some basic stats for the screen_names
def baseStats(AccountInfoList):
'''
input = return list from AccountInfo(L)
output: list of dictionaries with screenName, UserID, nrOfFollowers, nrOfFriends,
nrOfStatusUpdates, tweetsSince, tweetsPerDay, and tweetsPerYear
'''
AccountInfoList[1]
baseStatsList = []
for e in range(len(AccountInfoList)):
newDict = {} #creating a new dictionary for each account
screenName = AccountInfoList[e][1][0]['screen_name'].lower() # cf. @ Notebook 3 - Twitter CSV files
UserID = AccountInfoList[e][1][0]['id_str'].encode('utf-8')
nrOfFollowers = AccountInfoList[e][1][0]['followers_count'] #How many Followers?
nrOfFriends = AccountInfoList[e][1][0]['friends_count'] #How many Following/Friends?
nrOfStatusUpdates = AccountInfoList[e][1][0]['statuses_count']
tweetsSince = AccountInfoList[e][1][0]['created_at'].encode('utf-8')
#new in Dict:
DateOfLastTweet = AccountInfoList[e][1][0]['status']['created_at'].encode('utf-8')
#normalizing the location
'''
# This code is only necessary if the Twitter location is used instead of the DBS location
# location = AccountInfoList[e][1][0]['location'].encode('utf-8') #get the location (in case the screen_name isn't sufficient)
# list of words to remove from the location's description (Bundesländer & Country)
removeWords = ['Deutschland', 'Germany', 'Baden-Württemberg', 'Bayern', 'Brandenburg', 'Hessen', 'Mecklenburg-Vorpommern',
'Niedersachsen', 'Nordrhein-Westfalen', 'Rheinland-Pfalz', 'Saarland', 'Sachsen',
'Sachsen-Anhalt', 'Schleswig-Holstein','Thüringen'] #ausser 'Berlin', 'Bremen', 'Hamburg'!
#normalizing location (lowercase, stripping of Germany etc.) ("Oldenburg, Germany", "Hessen, Kassel"))
location = (location.replace(",", "")).lower() #remove separator and normalize to lowercase
for e in removeWords: #remove Bundesland and/or Country
if e.lower() in location:
location = location.strip(e.lower())
location = location.strip() #strip off white space
'''
location = AccountInfoList[e][0].lower()
idxLoc1 = location.find('/') # strip off everything from '/' on to the right (e.g. 'Frankfurt/M')
idxLoc2 = location.find('-') # strip off everything from '-' on to the right (e.g. 'Duisburg-Essen')
if idxLoc1 != -1:
location = location[:idxLoc1]
if idxLoc2 != -1:
location = location[:idxLoc2]
if 'sporths' in location:
location = location.strip('sporths') # the lib of KölnSportHS has given that as their location!
#calculating Tweets per day and year
t0 = time.mktime(time.strptime(tweetsSince, "%a %b %d %H:%M:%S +0000 %Y"))#returns date in seconds (from 1970-01-01)
t1 = time.time() #returns current date in seconds (from 1970-01-01)
diff = int(round((t1 - t0)/86400)) #calculates the difference in days (86400 sec per day)
tweetsPerDay = round((float(nrOfStatusUpdates)/diff),2) #returns nr of Tweets per day as a float
diffYear = round((diff/365.0),2)
tweetsPerYear = round((float(nrOfStatusUpdates)/diffYear),2) #returns nr of Tweets per year as a float
#calculating time since last Tweet
LastTweet_t0 = time.mktime(time.strptime(DateOfLastTweet, "%a %b %d %H:%M:%S +0000 %Y"))
daysSinceLastTweet = int(round((t1 - LastTweet_t0)/86400))
#writing to the dictionary
newDict['screen_name'] = screenName
newDict['id_str'] |
we-inc/mms-snow-white-and-the-seven-pandas | webserver/config/settings.py | Python | mit | 4,453 | 0.001347 | """
Django settings for webserver project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os, sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "apps"))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'cd8=h&(&^#m95znusg4-f65vl6t#e%_wpf=nn6a^xnuh2pn5pd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.users',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_filters',
'apps.ping',
'apps.lessors',
'apps.bank_accounts',
'apps.products',
'apps.markets',
'apps.tags',
'apps.commons',
'apps.booths',
'apps.reservations',
'apps.payments',
'apps.reports',
'apps.ratings',
]
AUTH_USER_MODEL = 'users.User'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DI | RS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'co | nfig.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
# REST framework
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
'PAGE_SIZE': 12,
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Email credentials
EMAIL_USE_TLS = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_PASSWORD = 'whale123mart'
EMAIL_HOST_USER = 'whalemart.noti@gmail.com'
EMAIL_PORT = 587
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
|
sserkez/ocelot | utils/sim_plot.py | Python | gpl-3.0 | 4,107 | 0.015827 | import sys
from ocelot.adaptors.genesis import *
from ocelot.cpbd.elements import Element, Quadrupole, RBend, Drift, Undulator
from ocelot import MagneticLattice
from ocelot.cpbd.beam import Beam
from ocelot.cpbd.optics import *
import numpy.fft as fft
from sim_info import SimInfo, RunInfo
#params = {'backend': 'ps', 'axes.labelsize': 18, 'text.fontsize': 18, 'legend.fontsize': 18, 'xtick.labelsize': 18, 'ytick.labelsize': 18, 'text.usetex': True}
#rcParams.update(params)
#rc('text', usetex=True) # required to have greek fonts on redhat
import argparse
h = 4.135667516e-15
c = 299792458.0
parser = argparse.ArgumentParser(description='FEL simulation postprocessor')
#parser.add_argument('--submit', help='submit to main index file', action='store_true')
parser.add_argument('--path', help='path to the experiment', default='./')
parser.add_argument('--stage', help='undulator/seeding stages 1 through 5', default='1')
parser.add_argument('--range', help='range of runs in the form i1:i2')
parser.add_argument('--field_file', help='read in field file', action='store_true')
args = parser.parse_args()
run_start, run_end = [int(i) for i in args.range.split(':') ]
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.set_xlabel('Time [fs]')
ax1.set_ylabel('Power [W]')
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.set_xlabel('Photon Energy [eV]')
ax2.set_ylabel('Spectrum [arb. units]')
ax2.get_xaxis().get_major_formatter().set_useOffset(False)
ax3 = ax2.twiny()
ax3.set_xlabel('Wavelength [nm]')
power_av = None
spec_av = None
runs = xrange(run_start, run_end+1)
for run_id in runs:
run_dir = args.path + '/run_' + str(run_id)
if args.stage in ['1','3','5']:
run_file = run_dir + '/run.' + str(run_id) + '.s' + str(args.stage) + '.gout'
if args.stage == '5' : run_file = run_dir + '/run.' + str(run_id) + '.gout'
print 'reading', run_file
g = readGenesisOutput(run_file)
field_file = run_file + '.dfl'
if args.field_file:
slices = readRadiationFile(fileName=field_file, npoints=g('ncar'))
P = np.zeros_like(slices[:,0,0])
for i in xrange(len(P)):
P[i] = sum( np.abs(np.multiply(slices[i,:,:], slices[i,:,:].conjugate())) )
t = np.linspace(g.t[0], g.t[-1], len(P))
else:
P = g.power_int
t = g.t
w_l_m = g('xlamds')
w_l_ev = h * c / g('xlamds')
x = np.roll(g.freq_ev, len(g.freq_ev)/2)+ w_l_ev
y = np.roll( np.abs(g.spec)**2, len(g.freq_ev)/2)
else:
run_file = run_dir + '/run.' + str(run_id) + '.s' + str( int(args.stage) - 1) + '.gout'
field_file = 'tmp' + str(args.stage) + '.dfl'
print 'reading', run_file, 'and', field_file
g = readGenesisOutput(run_file)
slices = readRadiationFile(fileName=run_dir + '/' + field_file, npoints=g('ncar'))
P = np.zeros_like(slices[:,0,0])
spec = np.zeros_like(slices[:,0,0])
for i in xrange(len(P)):
P[i] = sum( np.abs(np.multiply(slices[i,:,:], slices[i,:,:].conjugate())) )
t = np.linspace(g.t[0], g.t[-1], len(P))
w_l_m = g('xlamds')
w_l_ev = h * c / g('xlamds')
#x = np.roll(g.freq_ev, len(g.freq_ev)/2)+ w_l_ev
spec = fft.fft(slices[:,int( g('ncar')/2),int( g('ncar')/2)])
y = np.abs(spec)**2
x = h * fftfreq(len(spec), d=g('zsep') * g('xlamds') / c) + w_l_ev
if power_av == None:
power_av = P / len(runs)
else:
power_av + | = P / len(runs)
p1, = ax1.plot(t, P, color='black',alpha=0.4)
if spec_av == None:
spec_av = y / len(runs)
else:
spec_av += y / len(runs)
p2, = ax2.plot(x, y, color='black', alpha = 0.4)
ax2.set_xlim(x[0],x[-1])
ax3.set_xlim(x[0],x[-1])
x_ticks = ax2.get_xticks()[1:]
x2 = h*c/(x_ticks | ) * 1.e9 # coordinates in nm
ax3.set_xticks(x_ticks)
ax3.set_xticklabels(["%.4f" % z for z in x2])
ax1.plot(t, power_av, 'b')
ax2.plot(x, spec_av, 'b')
plt.show()
|
dpdani/csu | src/core/user.py | Python | gpl-3.0 | 7,518 | 0.029662 | # -*- coding: utf-8 -*-
"""
This module implements users.
"""
import os
from persistent import Persistent
import src.core.db as db
import src.core.exc as exc
import src.core.utils as utils
import src.core.security as security
import random
import time
class User(object):
def __init__(self):
self.__set_to_none()
def __set_to_none(self):
self.db = None
self.app_db = None
self.app_db_data = None
self.id = None
self.password_hash = None
self.password_verified = None
self.user_data = None # pdict
self.name = None
self.plugins = None # plugins are bound to Users
# TODO: create a list of allowed plugins.
def login(self, _id, password):
"""
Shorthand function for load, verify_password and load_user_db.
Automatically retrives the app_db_data of the given id.
"""
if self.app_db is None:
self.connect_to_appdb()
self.load(self.app_db.root['Users'][_id])
if self.verify_password(password):
self.load_user_db(password)
return self.password_verified
def close(self, password, save_plugins=True, set_to_none=True):
"""
Argument save_plugins will be directly passed to cDB.
Argument password will be used for authentication and key encryption.
Returns True in case user database closing had been successful, False otherwi | se.
Regardless of database closing, if set_to_none is True,
internal variables will be erased.
"""
closed = False
if db.user_db is not None:
if self.verify_password(password):
db.user_db.close(save_plugins=save_plugins, encrypt=True, key=password)
closed = True
else:
raise exc.USR_ClosingUnverifiedUser()
db.user_db = None
if set_to_none:
self.__set_to_none()
return closed
def delete_user(self, password):
"""
Removes user from app_ | db and deletes its database files.
User has to be logged in first.
User database will be closed in the process.
This function does not explicitly handle user closing (__set_to_none).
"""
if self.app_db is None:
self.connect_to_appdb()
if self.id is None:
raise exc.USR_DeletingUninitilizedUser()
if not self.verify_password(password):
raise exc.USR_DeletingUnverifiedUser()
self.close_db()
del self.app_db.root['Users'][self.id]
return self.delete_files()
def save_user(self):
"""
Saves user global information (specifically: in
'User-Info' and in 'Users'). Does not save data
in the user's database (handled by User.close).
"""
if db.user_db is None:
raise exc.USR_SavingUserOnUnitilizedDb()
db.user_db.root['User-Info'] = self.user_data
self.save_dbdata(save_in_db=True)
def create_user(self, name, password):
"""
Creates the user with the given password and name and a new randomly generated id.
Also loads the database db.user_db.
User doesn't get saved by this function either in the application database
or in the user database. It has to be saved explicitly at a later
moment. The user database gets initilized anyways even though it doesn't get saved.
"""
if type(password) != str:
raise exc.USR_BadPasswordType(password)
self.password_hash = security.pwd_context.encrypt(password)
if not security.pwd_context.verify(password, self.password_hash):
raise exc.USR_EncryptionFailureOnCreatingUser()
self.password_verified = True
self.id = generate_new_id()
self.name = name
self.init_db(decrypt=False)
self.user_data = utils.pdict()
self.user_data.update({
"CreationDay": time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
})
def load(self, app_db_data):
"""
This function loads the given folder (UserDataAppDB) into this User instance.
Essentially loads the user id and password hash that needs to be verified.
The password doesn't get verified. Must happen at a later moment.
"""
if app_db_data.__class__ != UserDataAppDB:
raise exc.USR_BadFolderClass(app_db_data)
self.retrieve_dbdata(app_db_data)
def verify_password(self, password):
"""
This function verifies the password argument with the loaded password hash.
Returns True in case the password has been correctly verified, False otherwise.
If no password hash has been loaded, None will be returned.
"""
self.password_verified = None
if self.password_hash is not None:
self.password_verified = security.pwd_context.verify(password, self.password_hash)
return self.password_verified
def connect_to_appdb(self):
"""
Connects this user to the application database
and stores it in User.app_db only if it was
already opened.
"""
if db.app_db is None:
raise exc.USR_AppDbClosed()
self.app_db = db.app_db
def load_user_db(self, password, decrypt=True):
if not self.password_verified:
raise exc.USR_LoadingUnverifiedUser()
self.init_db(decrypt=decrypt, password=password)
self.user_data = db.user_db.root['User-Info']
def retrieve_dbdata(self, app_db_data=None):
"""
Retrieves data from self.app_db_data.
For lazy people, app_db_data can be directly passed
to this function and self.app_db_data will automatically
be set to it.
"""
if app_db_data is not None:
self.app_db_data = app_db_data
if self.app_db_data is None:
raise exc.USR_AppDbDataNone()
self.id = self.app_db_data.id
self.name = self.app_db_data.name
self.password_hash = self.app_db_data.password_hash
def save_dbdata(self, save_in_db=False):
"""
Saves global user data to self.app_db_data.
For lazy people, if save_in_appdb = True, then
self.app_db_data will be saved in the application
database.
"""
if self.app_db is None:
self.connect_to_appdb()
self.app_db_data = UserDataAppDB()
self.app_db_data.id = self.id
self.app_db_data.name = self.name
self.app_db_data.password_hash = self.password_hash
if save_in_db:
self.app_db.root['Users'][self.id] = self.app_db_data
def delete_files(self):
"""
Deletes database files related to this user.
If a file doesn't exist the os.remove() call won't be executed.
A list containing the deleted files paths will be returned.
"""
base_path = os.path.join(utils.DATABASES_PATH, name_id(self.id)+".csu_user_db")
deleted = []
paths = [ # ZODB usually creates these files for one database.
base_path,
base_path + ".index",
base_path + ".lock",
base_path + ".tmp"
]
for path in paths:
if os.path.exists(path):
os.remove(path)
deleted.append(path)
return deleted
def init_db(self, decrypt=True, password=None):
"""
Initializes the user database and stores it in User.db.
:return: self.db
"""
self.db = db.init_user_db(name_id(self.id), decrypt=decrypt, key=password)
return self.db
def close_db(self):
"""
Closes User.db.
"""
return self.db.close()
class UserDataAppDB(Persistent):
def __init__(self, id=None, create=False):
if type(id) != int and id is not None:
raise exc.USR_BadIdType(id)
self.__set_to_none()
if create:
self.id = generate_new_id()
else: self.id = id
def __set_to_none(self):
self.id = None
self.password_hash = None
self.name = None
def generate_new_id():
"""
Generates a new id that wasn't already in the app_db.
If app_db is unavailable, the unavailable ids will
be considered to be none.
"""
if db.app_db is not None:
unavailable_ids = db.app_db.root['Users'].children.keys()
else:
unavailable_ids = []
while True:
new_id = random.randrange(10000,99999)
try:
unavailable_ids[new_id]
except IndexError:
return new_id
def name_id(id):
"""
Shorthand function for "user"+str(id).
"""
return "user" + str(id) |
icomms/wqmanager | apps/aquatest_reports/urls.py | Python | bsd-3-clause | 416 | 0.007212 | from django.conf.urls.defaults import *
## reports view
urlpatterns = patterns('aquatest_reports.views',
(r'^reports$', 'reports'),
(r'^sampling_points$', 'sampling_points'),
(r'^report_testers$', 'testers'),
(r'^date_range$', 'date_range'),
(r'^create_report$', 'create_report'),
(r'^export_csv$', 'export_csv'),
| (r'^export_pdf$', 'pdf_view'),
(r'^parameters$', 'paramete | rs'),
)
|
melinath/django-kiki | kiki/models.py | Python | bsd-2-clause | 8,088 | 0.039194 | import base64
import cPickle as pickle
import datetime
from email import message_from_string
from email.utils import getaddresses
from django import forms
from django.contrib.auth.models import User, Group
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.db import models
from django.db.models import Q, F
from django.utils import simplejson as json
from django.utils.encoding import smart_str
from kiki.message import KikiMessage
from kiki.validators import validate_local_part, validate_not_command
class ListUserMetadata(models.Model):
UNCONFIRMED = 0
SUBSCRIBER = 1
MODERATOR = 2
BLACKLISTED = 3
STATUS_CHOICES = (
(UNCONFIRMED, u'Unconfirmed'),
(SUBSCRIBER, u'Subscriber'),
(MODERATOR, u'Moderator'),
(BLACKLISTED, u'Blacklisted'),
)
user = models.ForeignKey(User)
mailing_list = models.ForeignKey('MailingList')
status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES, default=UNCONFIRMED, db_index=True)
def __unicode__(self):
return u"%s - %s - %s" % (self.user, self.mailing_list, self.get_status_display())
class Meta:
unique_together = ('user', 'mailing_list')
class MailingListManager(models.Manager):
def for_site(self, site):
return self.filter(site=site)
def for_addresses(self, addresses):
"""
Takes a an iterable of email addresses and returns a queryset of mailinglists attached to the current site with matching local parts.
"""
site = Site.objects.get_current()
local_parts = []
for addr in addresses:
addr = addr.rsplit('@', 1)
if addr[1] == site.domain:
local_parts.append(addr[0])
if not local_parts:
return self.none()
return self.filter(domain=site, local_part__in=local_parts)
class MailingList(models.Model):
"""
This model contains all options for a mailing list, as well as some helpful
methods for accessing subscribers, moderators, etc.
"""
objects = MailingListManager()
MODERATORS = "mod"
SUBSCRIBERS = "sub"
ANYONE = "all"
PERMISSION_CHOICES = (
(MODERATORS, 'Moderators',),
(SUBSCRIBERS, 'Subscribers',),
(ANYONE, 'Anyone',),
)
name = models.CharField(max_length=50)
subject_prefix = models.CharField(max_length=10, blank=True)
local_part = models.CharField(max_length=64, validators=[validate_local_part, validate_not_command])
domain = models.ForeignKey(Site)
description = models.TextField(blank=True)
who_can_post = models.CharField(max_length=3, choices=PERMISSION_CHOICES, default=SUBSCRIBERS)
self_subscribe_enabled = models.BooleanField(verbose_name='self-subscribe enabled', default=True)
moderation_enabled = models.BooleanField(help_text="If enabled, messages that would be rejected will be marked ``Requires Moderation`` and an email will be sent to the list's moderators.", default=False)
# If is_anonymous becomes an option, the precooker will need to handle some anonymizing.
#is_anonymous = models.BooleanField()
users = models.ManyToManyField(
User,
related_name = 'mailinglists',
blank = True,
null = | True,
through = ListUserMetadata
)
messages = models.ManyToManyField(
'Message',
related_name = 'mailinglists',
blank = True,
null = True,
through = 'ListMessage'
)
@property
def address(self):
return "%s@%s" % (self.local_part, self.domain.domain)
|
def _list_id_header(self):
# Does this need to be a byte string?
return smart_str(u"%s <%s.%s>" % (self.name, self.local_part, self.domain.domain))
def __unicode__(self):
return self.name
def clean(self):
validate_email(self.address)
# As per RFC 2919, the list_id_header has a max length of 255 octets.
if len(self._list_id_header()) > 254:
# Allow 4 extra spaces: the delimiters, the space, and the period.
raise ValidationError("The list name, local part, and site domain name can be at most 250 characters long together.")
def get_recipients(self):
"""Returns a queryset of :class:`User`\ s that should receive this message."""
qs = User.objects.filter(is_active=True)
qs = qs.filter(listusermetadata__mailing_list=self, listusermetadata__status__in=[ListUserMetadata.SUBSCRIBER, ListUserMetadata.MODERATOR])
return qs.distinct()
def _is_email_with_status(self, email, status):
if isinstance(email, basestring):
kwargs = {'user__email__iexact': email}
elif isinstance(email, User):
kwargs = {'user': email}
else:
return False
try:
self.listusermetadata_set.get(status=status, **kwargs)
except ListUserMetadata.DoesNotExist:
return False
return True
def is_subscriber(self, email):
return self._is_email_with_status(email, ListUserMetadata.SUBCRIBER)
def is_moderator(self, email):
return self._is_email_with_status(email, ListUserMetadata.MODERATOR)
def can_post(self, email):
if self.who_can_post == MailingList.ANYONE:
return True
if self.who_can_post == MailingList.SUBSCRIBERS and self.is_subscriber(email):
return True
if self.is_moderator(email):
return True
return False
class ProcessedMessageModel(models.Model):
"""
Encapsulates the logic required for storing and fetching pickled EmailMessage objects. This should eventually be replaced with a custom model field.
"""
processed_message = models.TextField(help_text="The processed form of the message at the current stage (pickled).", blank=True)
# Store the message as a base64-encoded pickle dump a la django-mailer.
def set_processed(self, msg):
self.processed_message = base64.encodestring(pickle.dumps(msg, pickle.HIGHEST_PROTOCOL))
self._processed = msg
def get_processed(self):
if not hasattr(self, '_processed'):
self._processed = pickle.loads(base64.decodestring(self.processed_message))
return self._processed
class Meta:
abstract = True
class Message(ProcessedMessageModel):
"""
Represents an email received by Kiki. Stores the original received message as well as a pickled version of the processed message.
"""
UNPROCESSED = 'u'
PROCESSED = 'p'
FAILED = 'f'
STATUS_CHOICES = (
(UNPROCESSED, 'Unprocessed'),
(PROCESSED, 'Processed'),
(FAILED, 'Failed'),
)
message_id = models.CharField(max_length=255, unique=True)
#: The message_id of the email this is in reply to.
# in_reply_to = models.CharField(max_length=255, db_index=True, blank=True)
from_email = models.EmailField()
received = models.DateTimeField()
status = models.CharField(max_length=1, choices=STATUS_CHOICES, db_index=True, default=UNPROCESSED)
original_message = models.TextField(help_text="The original raw text of the message.")
class ListMessage(ProcessedMessageModel):
"""
Represents the relationship between a :class:`Message` and a :class:`MailingList`. This is what is processed to handle the sending of a message to a list rather than the original message.
"""
ACCEPTED = 1
REQUIRES_MODERATION = 2
PREPPED = 3
SENT = 4
FAILED = 5
STATUS_CHOICES = (
(ACCEPTED, 'Accepted'),
(REQUIRES_MODERATION, 'Requires Moderation'),
(PREPPED, 'Prepped'),
(SENT, 'Sent'),
(FAILED, 'Failed'),
)
message = models.ForeignKey(Message)
mailing_list = models.ForeignKey(MailingList)
status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES, db_index=True)
class Meta:
unique_together = ('message', 'mailing_list',)
class ListCommand(models.Model):
#: The ListCommand has not been processed.
UNPROCESSED = 1
#: The ListCommand has been rejected (e.g. for permissioning reasons.)
REJECTED = 2
#: Ths ListCommand has been processed completely.
PROCESSED = 3
#: An error occurred while processing the ListCommand.
FAILED = 4
STATUS_CHOICES = (
(UNPROCESSED, 'Unprocessed'),
(REJECTED, 'Rejected'),
(PROCESSED, 'Processed'),
(FAILED, 'Failed'),
)
message = models.ForeignKey(Message)
mailing_list = models.ForeignKey(MailingList)
status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES, db_index=True, default=UNPROCESSED)
command = models.CharField(max_length=20) |
chippey/gaffer | python/GafferAppleseedUI/AppleseedShaderBallUI.py | Python | bsd-3-clause | 2,787 | 0.013994 | ##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferAppleseed
Gaffer.Metadata.registerNode(
GafferAppleseed.AppleseedShaderBall,
"description",
"""
Generates scenes suitable for | rendering shader balls with Appleseed.
""",
plugs = {
"environment" : [
"description",
"""
An environment map used for lighting. Should be in latlong
format.
""",
"plugValueWidget:type", "GafferUI.FileSystemPathPlugValueWidget",
"pathPlugValueWidget:leaf", True,
"pathPlugValueWidget:valid", True,
"pathPlugValueWidget:bookmarks", "texture",
],
"maxSamples" : [
"description",
"""
The maximum number of samples used by appleseed to render the
shader ball | . A value of 0 disables the limit.
"""
],
"threads" : [
"description",
"""
The number of threads used by appleseed to render the
shader ball. A value of 0 uses all cores, and negative
values reserve cores for other uses - to be used by
the rest of the UI for instance.
"""
],
}
)
|
malkavi/lutris | lutris/runners/mupen64plus.py | Python | gpl-3.0 | 1,602 | 0 | # -*- coding: utf-8 -*-
import os
from lutris import settings
from lutris.runners.ru | nner | import Runner
class mupen64plus(Runner):
"""Nintendo 64 emulator"""
human_name = "Mupen64Plus"
platform = "Nintendo 64"
game_options = [{
'option': 'main_file',
'type': 'file',
'label': 'ROM file',
'help': ("The game data, commonly called a ROM image.")
}]
runner_options = [
{
'option': 'fullscreen',
'type': 'bool',
'label': 'Fullscreen',
'default': True
},
{
'option': 'nogui',
'type': 'bool',
'label': 'Hide GUI',
'default': True
}
]
tarballs = {
'i386': 'mupen64plus-bundle-linux32-2.0.tar.gz',
'x64': 'mupen64plus-bundle-linux64-2.0-ubuntu.tar.gz',
}
@property
def working_dir(self):
return os.path.join(settings.RUNNER_DIR, 'mupen64plus')
def get_executable(self):
return os.path.join(settings.RUNNER_DIR, 'mupen64plus/mupen64plus')
def play(self):
arguments = [self.get_executable()]
if self.runner_config.get('nogui'):
arguments.append('--nogui')
if self.runner_config.get('fullscreen'):
arguments.append('--fullscreen')
else:
arguments.append('--windowed')
rom = self.game_config.get('main_file') or ''
if not os.path.exists(rom):
return {'error': 'FILE_NOT_FOUND', 'file': rom}
arguments.append("\"%s\"" % rom)
return {'command': arguments}
|
fake-name/ReadableWebProxy | runScheduler.py | Python | bsd-3-clause | 5,913 | 0.033824 | #!flask/bin/python
import sys
import os.path
import logging
os.environ['NDSCHEDULER_SETTINGS_MODULE'] = 'settings_sched'
addpath = os.path.abspath("./ndscheduler")
if addpath not in sys.path:
sys.path.append(os.path.abspath("./ndscheduler"))
import traceback
import datetime
import threading
import time
import apscheduler.events
import apscheduler.triggers.interval
import apscheduler.triggers.cron
# Shut up fucking annoying psycopg2 vomit every exec.
import warnings
from sqlalchemy import exc as sa_exc
warnings.filterwarnings("ignore", category=UserWarning, module='psycopg2')
warnings.simplefilter("ignore", category=sa_exc.SAWarning)
import ndscheduler
import ndscheduler.server.server
import common.stuck
import activeScheduledTasks
JOB_MAP = {
apscheduler.events.EVENT_SCHEDULER_STARTED : "EVENT_SCHEDULER_STARTED",
apscheduler.events.EVENT_SCHEDULER_SHUTDOWN : "EVENT_SCHEDULER_SHUTDOWN",
apscheduler.events.EVENT_SCHEDULER_PAUSED : "EVENT_SCHEDULER_PAUSED",
apscheduler.events.EVENT_SCHEDULER_RESUMED : "EVENT_SCHEDULER_RESUMED",
apscheduler.events.EVENT_EXECUTOR_ADDED : "EVENT_EXECUTOR_ADDED",
apscheduler.events.EVENT_EXECUTOR_REMOVED : "EVENT_EXECUTOR_REMOVED",
apscheduler.events.EVENT_JOBSTORE_ADDED : "EVENT_JOBSTORE_ADDED",
apscheduler.events.EVENT_JOBSTORE_REMOVED : "EVENT_JOBSTORE_REMOVED",
apscheduler.events.EVENT_ALL_JOBS_REMOVED : "EVENT_ALL_JOBS_REMOVED",
apscheduler.events.EVENT_JOB_ADDED : "EVENT_JOB_ADDED",
apscheduler.events.EVENT_JOB_REMOVED : "EVENT_JOB_REMOVED",
apscheduler.events.EVENT_JOB_MODIFIED : "EVENT_JOB_MODIFIED",
apscheduler.events.EVENT_JOB_SUBMITTED : "EVENT_JOB_SUBMITTED",
apscheduler.events.EVENT_JOB_MAX_INSTANCES : "EVENT_JOB_MAX_INSTANCES",
apscheduler.events.EVENT_JOB_EXECUTED : "EVENT_JOB_EXECUTED",
apscheduler.events.EVENT_JOB_ERROR : "EVENT_JOB_ERROR",
apscheduler.events.EVENT_JOB_MISSED : "EVENT_JOB_MISSED",
apscheduler.events.EVENT_ALL : "EVENT_ALL",
}
log = logging.getLogger("Main.Runtime")
def job_evt_listener(event):
if hasattr(event, "exception") and event.exception:
log.info('Job crashed: %s', event.job_id)
log.info('Traceback: %s', event.traceback)
else:
log.info('Job event code: %s, job: %s', JOB_MAP[event.code], event.job_id)
if event.code == apscheduler.events.EVENT_JOB_MAX_INSTANCES:
log.info('Job event code: %s, job: %s', JOB_MAP[event.code], event.job_id)
log.error("Missed job execution! Killing job executor to unstick jobs")
print('Job event code: %s, job: %s' % (JOB_MAP[event.code], event.job_id))
print("Missed job execution! Killing job executor to unstick jobs")
import ctypes
ctypes.string_at(1)
import os
os.kill(0,4)
class SimpleServer(ndscheduler.server.server.SchedulerServer):
def post_scheduler_start(self):
active_jobs = set()
current_jobs = self.scheduler_manager.get_jobs()
start_date = datetime.datetime.now()
for job in current_jobs:
job_str, job_id = job.args[:2]
active_jobs.add(job_str)
# We only actively manage jobs that start with "AUTO". That lets us
# have manually added jobs that exist outside of the management interface.
if not job.name.startswith("AUTO: "):
continue
if job_str not in activeScheduledTasks.target_jobs:
print("Removing job: %s -> %s" % (job_str, job_id))
self.scheduler_manager.remove_job(job_id)
else:
job_params = activeScheduledTasks.target_jobs[job_str]
if job_params.get('interval'):
trig = apscheduler.triggers.interval.IntervalTrigger(
seconds = job_params.get('interval'),
start_date = start_date,
)
start_date = start_date + datetime.timedelta(minutes=5)
else:
trig = apscheduler.triggers.cron.CronTrigger(
month = job_params.get('month', None),
day = job_params.get('day', None),
day_of_week = job_params.get('day_of_week', None),
hour = job_params.get('hour', None),
minute = job_params.get('minute', None),
)
if job.name != job_params['name']:
self.scheduler_manager.remove_job(job_id)
# So the apscheduler CronTrigger class doesn't provide the equality
# operator, so we compare the stringified version. Gah.
elif str(job.trigger) != str(trig):
print("Removing due to trigger mismatch:", str(job.trigger), str(trig))
print("Job name: ", job.name, job)
self.scheduler_manager.remove_job(job_id)
start_date = datetime.datetime.now()
current_jobs = self.scheduler_manager.get_jobs()
for job_name, params in activeScheduledTasks.target_jobs.items():
if job_name not in active_jobs:
assert params['name'].startswith("AUTO: ")
print("Adding job: %s" % job_name)
if params.get('interval'):
trig = apscheduler.triggers.interval.IntervalTrigger(
seconds = params.get('interval'),
start_date = start_date,
)
start_date = start_date + datetime.timedelta(minutes=5)
else:
trig = apscheduler.triggers.cron.CronTrigger(
month = params.get('month', None),
day = params.get('day', None),
day_of_week = params.get('day_of_week', None),
hour = params.get('hour', None), |
minute = params.get('minute', None),
)
self.scheduler_manager.add_trigger_job(
job_class_string = job_name,
name = params['name'],
| trigger = trig,
)
self.scheduler_manager.sched.add_listener(job_evt_listener,
apscheduler.events.EVENT_JOB_EXECUTED |
apscheduler.events.EVENT_JOB_ERROR |
apscheduler.events.EVENT_JOB_MISSED |
apscheduler.events.EVENT_JOB_MAX_INSTANCES
)
def run_scheduler():
# common.stuck.install_pystuck()
SimpleServer.run()
if __name__ == "__main__":
import logSetup
logSetup.initLogging()
run_scheduler()
|
chennan47/osf.io | addons/wiki/views.py | Python | apache-2.0 | 19,789 | 0.001213 | # -*- coding: utf-8 -*-
import httplib as http
import logging
from flask import request
from framework.exceptions import HTTPError
from framework.auth.utils import privacy_info_handle
from framework.auth.decorators import must_be_logged_in
from framework.flask import redirect
from addons.wiki.utils import to_mongo_key
from addons.wiki import settings
from addons.wiki import utils as wiki_utils
from website.profile.utils import get_profile_image_url
from website.project.views.node import _view_project
from website.project.model import has_anonymous_link
from website.ember_osf_web.decorators import ember_flag_is_active
from website.project.decorators import (
must_be_contributor_or_public,
must_have_addon, must_not_be_registration,
must_be_valid_project,
must_have_permission,
must_have_write_permission_or_public_wiki,
must_not_be_retracted_registration,
)
from website.exceptions import NodeStateError
from osf.exceptions import ValidationError
from .exceptions import (
NameEmptyError,
NameInvalidError,
NameMaximumLengthError,
PageCannotRenameError,
PageConflictError,
PageNotFoundError,
InvalidVersionError,
)
from .models import NodeWikiPage
logger = logging.getLogger(__name__)
WIKI_NAME_EMPTY_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page name cannot be empty.'
))
WIKI_NAME_MAXIMUM_LENGTH_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page name cannot be more than 100 characters.'
))
WIKI_PAGE_CANNOT_RENAME_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page cannot be renamed.'
))
WIKI_PAGE_CONFLICT_ERROR = HTTPError(http.CONFLICT, data=dict(
message_short='Page conflict',
message_long='A wiki page with that name already exists.'
))
WIKI_PAGE_NOT_FOUND_ERROR = HTTPError(http.NOT_FOUND, data=dict(
message_short='Not found',
message_long='A wiki page could not be found.'
))
WIKI_INVALID_VERSION_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The requested version of this wiki page does not exist.'
))
def _get_wiki_versions(node, name, anonymous=False):
key = to_mongo_key(name)
# Skip if wiki_page doesn't exist; happens on new projects before
# default "home" page is created
if key not in node.wiki_pages_versions:
return []
versions = [
NodeWikiPage.load(version_wiki_id)
for version_wiki_id in node.wiki_pages_versions[key]
]
return [
| {
'version': version.version,
'user_fullname': privacy_info_handle(version.user.fullname, anonymous, name=True),
'date': '{} UTC'.format(v | ersion.date.replace(microsecond=0).isoformat().replace('T', ' ')),
}
for version in reversed(versions)
]
def _get_wiki_pages_current(node):
return [
{
'name': sorted_page.page_name,
'url': node.web_url_for('project_wiki_view', wname=sorted_page.page_name, _guid=True),
'wiki_id': sorted_page._primary_key,
'wiki_content': _wiki_page_content(sorted_page.page_name, node=node)
}
for sorted_page in [
node.get_wiki_page(sorted_key)
for sorted_key in sorted(node.wiki_pages_current)
]
# TODO: remove after forward slash migration
if sorted_page is not None
]
def _get_wiki_api_urls(node, name, additional_urls=None):
urls = {
'base': node.api_url_for('project_wiki_home'),
'delete': node.api_url_for('project_wiki_delete', wname=name),
'rename': node.api_url_for('project_wiki_rename', wname=name),
'content': node.api_url_for('wiki_page_content', wname=name),
'settings': node.api_url_for('edit_wiki_settings'),
'grid': node.api_url_for('project_wiki_grid_data', wname=name)
}
if additional_urls:
urls.update(additional_urls)
return urls
def _get_wiki_web_urls(node, key, version=1, additional_urls=None):
urls = {
'base': node.web_url_for('project_wiki_home', _guid=True),
'edit': node.web_url_for('project_wiki_view', wname=key, _guid=True),
'home': node.web_url_for('project_wiki_home', _guid=True),
'page': node.web_url_for('project_wiki_view', wname=key, _guid=True),
}
if additional_urls:
urls.update(additional_urls)
return urls
@must_be_valid_project
@must_have_write_permission_or_public_wiki
@must_have_addon('wiki', 'node')
def wiki_page_draft(wname, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_page = node.get_wiki_page(wname)
return {
'wiki_content': wiki_page.content if wiki_page else None,
'wiki_draft': (wiki_page.get_draft(node) if wiki_page
else wiki_utils.get_sharejs_content(node, wname)),
}
def _wiki_page_content(wname, wver=None, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_page = node.get_wiki_page(wname, version=wver)
rendered_before_update = wiki_page.rendered_before_update if wiki_page else False
return {
'wiki_content': wiki_page.content if wiki_page else '',
'rendered_before_update': rendered_before_update
}
@must_be_valid_project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def wiki_page_content(wname, wver=None, **kwargs):
return _wiki_page_content(wname, wver=wver, **kwargs)
@must_be_valid_project # injects project
@must_have_permission('write') # injects user, project
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_delete(auth, wname, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_name = wname.strip()
wiki_page = node.get_wiki_page(wiki_name)
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, wiki_name)
if not wiki_page:
raise HTTPError(http.NOT_FOUND)
node.delete_node_wiki(wiki_name, auth)
wiki_utils.broadcast_to_sharejs('delete', sharejs_uuid, node)
return {}
@must_be_valid_project # returns project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
@must_not_be_retracted_registration
def project_wiki_view(auth, wname, path=None, **kwargs):
node = kwargs['node'] or kwargs['project']
anonymous = has_anonymous_link(node, auth)
wiki_name = (wname or '').strip()
wiki_key = to_mongo_key(wiki_name)
wiki_page = node.get_wiki_page(wiki_name)
wiki_settings = node.get_addon('wiki')
can_edit = (
auth.logged_in and not
node.is_registration and (
node.has_permission(auth.user, 'write') or
wiki_settings.is_publicly_editable
)
)
versions = _get_wiki_versions(node, wiki_name, anonymous=anonymous)
# Determine panels used in view
panels = {'view', 'edit', 'compare', 'menu'}
if request.args and set(request.args).intersection(panels):
panels_used = [panel for panel in request.args if panel in panels]
num_columns = len(set(panels_used).intersection({'view', 'edit', 'compare'}))
if num_columns == 0:
panels_used.append('view')
num_columns = 1
else:
panels_used = ['view', 'menu']
num_columns = 1
try:
view = wiki_utils.format_wiki_version(
version=request.args.get('view'),
num_versions=len(versions),
allow_preview=True,
)
compare = wiki_utils.format_wiki_version(
version=request.args.get('compare'),
num_versions=len(versions),
allow_preview=False,
)
except InvalidVersionError:
raise WIKI_INVALID_VERSION_ERROR
# ensure home is always lower case since it cannot be renamed
if wiki_name.lower() == 'home':
wiki_name = 'home'
if wiki_page:
version = wiki_page.version
is_current = wiki_page.is_current
content = wiki_page.html(node)
rendered_before_update = wiki_page.rendered_before_update
else:
version = 'NA'
|
afraser/CellProfiler-Analyst | cpa/sortbin.py | Python | gpl-2.0 | 12,471 | 0.006976 | from dbconnect import DBConnect
import tilecollection
from imagetile import ImageTile
from imagetilesizer import ImageTileSizer
from imagecontrolpanel import ImageControlPanel
from properties import Properties
import imagetools
import cPickle
import wx
p = Properties.getInstance()
db = DBConnect.getInstance()
# The event type is shared, and there is no information in the event
# about which SortBin it came from. That's ok because the handler
# will need to check all the SortBins anyway.
EVT_QUANTITY_CHANGED = wx.PyEventBinder(wx.NewEventType(), 1)
class CellMontageFrame(wx.Frame):
'''A frame that allows you to add a bunch of object tiles
'''
def __init__(self, parent, **kwargs):
wx.Frame.__init__(self, parent, **kwargs)
self.sb = SortBin(self)
| self.cp = wx.CollapsiblePane(self, label='Show controls', style=wx.CP_DEFAULT_STYLE|wx.CP_NO_TLW_RESIZE)
self.icp = ImageC | ontrolPanel(self.cp.GetPane(), self)
self.Sizer = wx.BoxSizer(wx.VERTICAL)
self.Sizer.Add(self.sb, 1, wx.EXPAND)
self.Sizer.Add(self.cp, 0, wx.EXPAND)
self.cp.Bind(wx.EVT_COLLAPSIBLEPANE_CHANGED, self._on_control_pane_change)
def _on_control_pane_change(self, evt=None):
self.Layout()
if self.cp.IsExpanded():
self.cp.SetLabel('Hide controls')
else:
self.cp.SetLabel('Show controls')
def add_objects(self, obkeys):
self.sb.AddObjects(obkeys)
#
# required by ImageControlPanel
#
def SetBrightness(self, brightness):
[t.SetBrightness(brightness) for t in self.sb.tiles]
def SetScale(self, scale):
[t.SetScale(scale) for t in self.sb.tiles]
self.sb.UpdateSizer()
def SetContrastMode(self, mode):
[t.SetContrastMode(mode) for t in self.sb.tiles]
class SortBinDropTarget(wx.DropTarget):
def __init__(self, bin):
wx.DropTarget.__init__(self)
self.data = wx.CustomDataObject("ObjectKey")
self.SetDataObject(self.data)
self.bin = bin
def OnData(self, x, y, dragres):
if not self.GetData():
return wx.DragNone
draginfo = self.data.GetData()
srcID, obKeys = cPickle.loads(draginfo)
if not obKeys:
return wx.DragNone
return self.bin.ReceiveDrop(srcID, obKeys)
class SortBin(wx.ScrolledWindow):
'''
SortBins contain collections of objects as small image tiles
that can be dragged to other SortBins for classification.
'''
def __init__(self, parent, chMap=None, label='', classifier=None, parentSizer=None):
wx.ScrolledWindow.__init__(self, parent)
self.SetDropTarget(SortBinDropTarget(self))
self.label = label
self.parentSizer = parentSizer
self.tiles = []
self.classifier = classifier
self.trained = False
self.empty = True
self.tile_collection = None # tile collection
if chMap:
self.chMap = chMap
else:
self.chMap = p.image_channel_colors
self.SetBackgroundColour('#000000')
self.sizer = ImageTileSizer()
self.SetSizer(self.sizer)
self.SetMinSize((50, 50))
(w,h) = self.sizer.GetSize()
self.SetScrollbars(20,20,w/20,h/20,0,0)
self.EnableScrolling(x_scrolling=False, y_scrolling=True)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)
self.Bind(wx.EVT_KEY_DOWN, self.OnKey)
# stop focus events from propagating to the evil
# wx.ScrollWindow class which otherwise causes scroll jumping.
self.Bind(wx.EVT_SET_FOCUS, (lambda(evt):None))
tilecollection.EVT_TILE_UPDATED(self, self.OnTileUpdated)
self.CreatePopupMenu()
def __str__(self):
return 'Bin %s with %d objects'%(self.label, len(self.sizer.GetChildren()))
def CreatePopupMenu(self):
popupMenuItems = ['View full images of selected',
'Select all\tCtrl+A',
'Deselect all\tCtrl+D',
'Invert selection\tCtrl+I',
'Remove selected\tDelete']
if self.label != 'unclassified' and self.classifier is not None:
popupMenuItems += ['Rename class', 'Delete bin']
self.popupItemIndexById = {}
self.popupMenu = wx.Menu()
for i, item in enumerate(popupMenuItems):
id = wx.NewId()
self.popupItemIndexById[id] = i
self.popupMenu.Append(id,item)
self.popupMenu.Bind(wx.EVT_MENU,self.OnSelectFromPopupMenu)
def OnKey(self, evt):
''' Keyboard shortcuts '''
if evt.GetKeyCode() in [wx.WXK_DELETE, wx.WXK_BACK]: # delete
self.RemoveSelectedTiles()
elif evt.ControlDown() or evt.CmdDown():
if evt.GetKeyCode() == ord('A'):
self.SelectAll()
elif evt.GetKeyCode() == ord('D'):
self.DeselectAll()
elif evt.GetKeyCode() == ord('I'):
self.InvertSelection()
evt.Skip()
def OnRightDown(self, evt):
''' On right click show popup menu. '''
self.PopupMenu(self.popupMenu, evt.GetPosition())
def OnSelectFromPopupMenu(self, evt):
''' Handles selections from the popup menu. '''
choice = self.popupItemIndexById[evt.GetId()]
if choice == 0:
for key in self.SelectedKeys():
if self.classifier:
imViewer = imagetools.ShowImage(key[:-1], self.chMap[:],
parent=self.classifier,
brightness=self.classifier.brightness,
scale=self.classifier.scale,
contrast=self.classifier.contrast)
else:
imViewer = imagetools.ShowImage(key[:-1], self.chMap[:], parent=self)
imViewer.imagePanel.SelectPoint(db.GetObjectCoords(key))
elif choice == 1:
self.SelectAll()
elif choice == 2:
self.DeselectAll()
elif choice == 3:
self.InvertSelection()
elif choice == 4:
self.RemoveSelectedTiles()
elif choice == 5:
self.classifier.RenameClass(self.label)
elif choice == 6:
self.classifier.RemoveSortClass(self.label)
def AddObject(self, obKey, chMap=None, priority=1, pos='first'):
self.AddObjects([obKey], chMap, priority, pos)
def AddObjects(self, obKeys, chMap=None, priority=1, pos='first'):
if chMap is None:
chMap = p.image_channel_colors
if self.tile_collection == None:
self.tile_collection = tilecollection.TileCollection.getInstance()
imgSet = self.tile_collection.GetTiles(obKeys, (self.classifier or self), priority)
for i, obKey, imgs in zip(range(len(obKeys)), obKeys, imgSet):
if self.classifier:
newTile = ImageTile(self, obKey, imgs, chMap, False,
scale=self.classifier.scale,
brightness=self.classifier.brightness,
contrast=self.classifier.contrast)
else:
newTile = ImageTile(self, obKey, imgs, chMap, False)
if pos == 'first':
self.tiles.insert(i, newTile)
self.sizer.Insert(i, newTile, 0, wx.ALL|wx.EXPAND, 1 )
else:
self.tiles.append(newTile)
self.sizer.Add(newTile, 0, wx.ALL|wx.EXPAND, 1)
self.UpdateSizer()
self.UpdateQuantity()
def RemoveKey(self, obKey):
''' Removes the specified tile. '''
self.RemoveKeys([obKey])
def RemoveKeys(self, obKeys):
''' Removes the specified tile. '''
for t in self.t |
amraboelela/swift | utils/gyb_syntax_support/NodeSerializationCodes.py | Python | apache-2.0 | 7,221 | 0 | from Node import error
SYNTAX_NODE_SERIALIZATION_CODES = {
# 0 is 'Token'. Needs to be defined manually
# 1 is 'Unknown'. Needs to be defined manually
'UnknownDecl': 2,
'TypealiasDecl': 3,
'AssociatedtypeDecl': 4,
'IfConfigDecl': 5,
'PoundErrorDecl': 6,
'PoundWarningDecl': 7,
'PoundSourceLocation': 8,
'ClassDecl': 9,
'StructDecl': 10,
'ProtocolDecl': 11,
'ExtensionDecl': 12,
'FunctionDecl': 13,
'InitializerDecl': 14,
'DeinitializerDecl': 15,
'SubscriptDecl': 16,
'ImportDecl': 17,
'AccessorDecl': 18,
'VariableDecl': 19,
'EnumCaseDecl': 20,
'EnumDecl': 21,
'OperatorDecl': 22,
'PrecedenceGroupDecl': 23,
'UnknownExpr': 24,
'InOutExpr': 25,
'PoundColumnExpr': 26,
'TryExpr': 27,
'IdentifierExpr': 28,
'SuperRefExpr': 29,
'NilLiteralExpr': 30,
'DiscardAssignmentExpr': 31,
'AssignmentExpr': 32,
'SequenceExpr': 33,
'PoundLineExpr': 34,
'PoundFileExpr': 35,
'PoundFunctionExpr': 36,
'PoundDsohandleExpr': 37,
'SymbolicReferenceExpr': 38,
'PrefixOperatorExpr': 39,
'BinaryOperatorExpr': 40,
'ArrowExpr': 41,
'FloatLiteralExpr': 42,
'TupleExpr': 43,
'ArrayExpr': 44,
'DictionaryExpr': 45,
'ImplicitMemberExpr': 46,
'IntegerLiteralExpr': 47,
'StringLiteralExpr': 48,
'BooleanLiteralExpr': 49,
'TernaryExpr': 50,
'MemberAccessExpr': 51,
'DotSelfExpr': 52,
'IsExpr': 53,
'AsExpr': 54,
'TypeExpr': 55,
'ClosureExpr': 56,
'UnresolvedPatternExpr': 57,
'FunctionCallExpr': 58,
'SubscriptExpr': 59,
'OptionalChainingExpr': 60,
'ForcedValueExpr': 61,
'PostfixUnaryExpr': 62,
'SpecializeExpr': 63,
'StringInterpolationExpr': 64,
'KeyPathExpr': 65,
'KeyPathBaseExpr': 66,
'ObjcKeyPathExpr': 67,
'ObjcSelectorExpr': 68,
'EditorPlaceholderExpr': 69,
'ObjectLiteralExpr': 70,
'UnknownStmt': 71,
'ContinueStmt': 72,
'WhileStmt': 73,
'DeferStmt': 74,
'ExpressionStmt': 75,
'RepeatWhileStmt': 76,
'GuardStmt': 77,
'ForInStmt': 78,
'SwitchStmt': 79,
'DoStmt': 80,
'ReturnStmt': 81,
'FallthroughStmt': 82,
'BreakStmt': 83,
'DeclarationStmt': 84,
'ThrowStmt': 85,
'IfStmt': 86,
'Decl': 87,
'Expr': 88,
'Stmt': 89,
'Type': 90,
'Pattern': 91,
'CodeBlockItem': 92,
'CodeBlock': 93,
'DeclNameArgument': 94,
'DeclNameArguments': 95,
'FunctionCallArgument': 96,
'TupleElement': 97,
'ArrayElement': 98,
'DictionaryElement': 99,
'ClosureCaptureItem': 100,
'ClosureCaptureSignature': 101,
'ClosureParam': 102,
'ClosureSignature': 103,
'StringSegment': 104,
'ExpressionSegment': 105,
'ObjcNamePiece': 106,
'TypeInitializerClause': 107,
'ParameterClause': 108,
'ReturnClause': 109,
'FunctionSignature': 110,
'IfConfigClause': 111,
'PoundSourceLocationArgs': 112,
'DeclModifier': 113,
'InheritedType': 114,
'TypeInheritanceClause': 115,
'MemberDeclBlock': 116,
'MemberDeclListItem': 117,
'SourceFile': 118,
'InitializerClause': 119,
'FunctionParameter': 120,
'AccessLevelModifier': 121,
'AccessPathComponent': 122,
'AccessorParameter': 123,
'AccessorBlock': 124,
'PatternBinding': 125,
'EnumCaseElement': 126,
'OperatorPrecedenceAndTypes': 127,
'PrecedenceGroupRelation': 128,
'PrecedenceGroupNameElement': 129,
'PrecedenceGroupAssignment': 130,
'PrecedenceGroupAssociativity': 131,
'Attribute': 132,
'LabeledSpecializeEntry': 133,
'ImplementsAttributeArguments': 134,
'ObjCSelectorPiece': 135,
'WhereClause': 136,
'ConditionElement': 137,
'AvailabilityCondition': 138,
'MatchingPatternCondition': 139,
'OptionalBindingCondition': 140,
'ElseIfContinuation': 141,
'ElseBlock': 142,
'SwitchCase': 143,
'SwitchDefaultLabel': 144,
'CaseItem': 145,
'SwitchCaseLabel': 146,
'CatchClause': 147,
'GenericWhereClause': 148,
'SameTypeRequirement': 149,
'GenericParameter': 150,
'GenericParameterClause': 151,
'ConformanceRequirement': 152,
'CompositionTypeElement': 153,
'TupleTypeElement': 154,
'GenericArgument': 155,
'GenericArgumentClause': 156,
'TypeAnnotation': 157,
'TuplePatternElement': 158,
'AvailabilityArgument': 159,
'AvailabilityLabeledArgument': 160,
'AvailabilityVersionRestriction': 161,
'VersionTuple': 162,
'CodeBlockItemList': 163,
'FunctionCallArgumentList': 164,
'TupleElementList | ': 165,
' | ArrayElementList': 166,
'DictionaryElementList': 167,
'StringInterpolationSegments': 168,
'DeclNameArgumentList': 169,
'ExprList': 170,
'ClosureCaptureItemList': 171,
'ClosureParamList': 172,
'ObjcName': 173,
'FunctionParameterList': 174,
'IfConfigClauseList': 175,
'InheritedTypeList': 176,
'MemberDeclList': 177,
'ModifierList': 178,
'AccessPath': 179,
'AccessorList': 180,
'PatternBindingList': 181,
'EnumCaseElementList': 182,
'PrecedenceGroupAttributeList': 183,
'PrecedenceGroupNameList': 184,
'TokenList': 185,
'NonEmptyTokenList': 186,
'AttributeList': 187,
'SpecializeAttributeSpecList': 188,
'ObjCSelector': 189,
'SwitchCaseList': 190,
'CatchClauseList': 191,
'CaseItemList': 192,
'ConditionElementList': 193,
'GenericRequirementList': 194,
'GenericParameterList': 195,
'CompositionTypeElementList': 196,
'TupleTypeElementList': 197,
'GenericArgumentList': 198,
'TuplePatternElementList': 199,
'AvailabilitySpecList': 200,
'UnknownPattern': 201,
'EnumCasePattern': 202,
'IsTypePattern': 203,
'OptionalPattern': 204,
'IdentifierPattern': 205,
'AsTypePattern': 206,
'TuplePattern': 207,
'WildcardPattern': 208,
'ExpressionPattern': 209,
'ValueBindingPattern': 210,
'UnknownType': 211,
'SimpleTypeIdentifier': 212,
'MemberTypeIdentifier': 213,
'ClassRestrictionType': 214,
'ArrayType': 215,
'DictionaryType': 216,
'MetatypeType': 217,
'OptionalType': 218,
'ImplicitlyUnwrappedOptionalType': 219,
'CompositionType': 220,
'TupleType': 221,
'FunctionType': 222,
'AttributedType': 223,
'YieldStmt': 224,
'YieldList': 225,
'IdentifierList': 226,
'NamedAttributeStringArgument': 227,
'DeclName': 228,
'PoundAssertStmt': 229,
}
def verify_syntax_node_serialization_codes(nodes, serialization_codes):
# Verify that all nodes have serialization codes
for node in nodes:
if not node.is_base() and node.syntax_kind not in serialization_codes:
error('Node %s has no serialization code' % node.syntax_kind)
# Verify that no serialization code is used twice
used_codes = set()
for serialization_code in serialization_codes.values():
if serialization_code in used_codes:
error("Serialization code %d used twice" % serialization_code)
used_codes.add(serialization_code)
def get_serialization_code(syntax_kind):
return SYNTAX_NODE_SERIALIZATION_CODES[syntax_kind]
|
pokidovea/immobilus | tests/test_async.py | Python | apache-2.0 | 780 | 0 | import asyncio
from immobilus import immobilus |
from datetime import datetime
import pytest
@pytest.mark.asyncio
@immobilus('2000-02-01 13:23')
async def test_decorated_async_function():
assert datetime.utcnow() == datetime(2000, 2, 1, 13, 23)
@pytest.mark.asyncio
async def test_async_function_under_context():
dt = datetime(2016, 1, 1, 13, 54)
assert datetime.utcnow() != dt
with immobilus('2016-01-01 13:54'):
assert datetime.utcnow() == dt
| assert datetime.utcnow() != dt
@immobilus('2017-10-20')
async def some_coroutine():
return datetime.now()
def test_coroutine():
loop = asyncio.new_event_loop()
result = loop.run_until_complete(some_coroutine())
assert result.strftime('%Y-%m-%d %H:%M:%S') == '2017-10-20 00:00:00'
|
thepiper/standoff | vpy/lib/python2.7/site-packages/werkzeug/debug/tbtools.py | Python | gpl-3.0 | 16,913 | 0.000473 | # -*- coding: utf-8 -*-
"""
werkzeug.debug.tbtools
~~~~~~~~~~~~~~~~~~~~~~
This module provides various traceback related utility functions.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import re
import os
import sys
import json
import inspect
import traceback
import codecs
from tokenize import TokenError
from werkzeug.utils import cached_property, escape
from werkzeug.debug.console import Console
from werkzeug._compat import range_type, PY2, text_type, string_types, \
to_native, to_unicode
_coding_re = re.compile(br'coding[:=]\s*([-\w.]+)')
_line_re = re.compile(br'^(.*?)$(?m)')
_funcdef_re = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
UTF8_COOKIE = b'\xef\xbb\xbf'
system_exceptions = (SystemExit, KeyboardInterrupt)
try:
system_exceptions += (GeneratorExit,)
except NameError:
pass
HEADER = u'''\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>%(title)s // Werkzeug Debugger</title>
<link rel="stylesheet" href="?__debugger__=yes&cmd=resource&f=style.css" type="text/css">
<!-- We need to make sure this has a favicon so that the debugger does not by
accident trigger a request to /favicon.ico which might change the application
state. -->
<link rel="shortcut icon" href="?__debugger__=yes&cmd=resource&f=console.png">
<script type="text/javascript" src="?__debugger__=yes&cmd=resource&f=jquery.js"></script>
<script type="text/javascript" src="?__debugger__=yes&cmd=resource&f=debugger.js"></script>
<script type="text/javascript">
var TRACEBACK = %(traceback_id)d,
CONSOLE_MODE = %(console)s,
EVALEX = %(evalex)s,
SECRET = "%(secret)s";
</script>
</head>
<body>
<div class="debugger">
'''
FOOTER = u'''\
<div class="footer">
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
friendly Werkzeug powered traceback interpreter.
</div>
</div>
</body>
</html>
'''
PAGE_HTML = HEADER + u'''\
<h1>%(exception_type)s</h1>
<div class="detail">
<p class="errormsg">%(exception)s</p>
</div>
<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
%(summary)s
<div class="plain">
<form action="/?__debugger__=yes&cmd=paste" method="post">
<p>
<input type="hidden" name="language" value="pytb">
This is the Copy/Paste friendly version of the traceback. <span
class="pastemessage">You can also paste this traceback into
a <a href="https://gist.github.com/">gist</a>:
<input type="submit" value="create paste"></span>
</p>
<textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
</form>
</div>
<div class="explanation">
The debugger caught an exception in your WSGI application. You can now
look at the traceback which led to the error. <span class="nojavascript">
If you enable JavaScript you can also use additional features such as code
execution (if the evalex feature is enabled), automatic pasting of the
exceptions and much more.</span>
</div>
''' + FOOTER + '''
<!--
%(plaintext_cs)s
-->
'''
CONSOLE_HTML = HEADER + u | '''\
<h1>Interactive Console</h1>
<div class="explanat | ion">
In this console you can execute Python expressions in the context of the
application. The initial namespace was created by the debugger automatically.
</div>
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
''' + FOOTER
SUMMARY_HTML = u'''\
<div class="%(classes)s">
%(title)s
<ul>%(frames)s</ul>
%(description)s
</div>
'''
FRAME_HTML = u'''\
<div class="frame" id="frame-%(id)d">
<h4>File <cite class="filename">"%(filename)s"</cite>,
line <em class="line">%(lineno)s</em>,
in <code class="function">%(function_name)s</code></h4>
<pre>%(current_line)s</pre>
</div>
'''
SOURCE_TABLE_HTML = u'<table class=source>%s</table>'
SOURCE_LINE_HTML = u'''\
<tr class="%(classes)s">
<td class=lineno>%(lineno)s</td>
<td>%(code)s</td>
</tr>
'''
def render_console_html(secret):
return CONSOLE_HTML % {
'evalex': 'true',
'console': 'true',
'title': 'Console',
'secret': secret,
'traceback_id': -1
}
def get_current_traceback(ignore_system_exceptions=False,
show_hidden_frames=False, skip=0):
"""Get the current exception info as `Traceback` object. Per default
calling this method will reraise system exceptions such as generator exit,
system exit or others. This behavior can be disabled by passing `False`
to the function as first parameter.
"""
exc_type, exc_value, tb = sys.exc_info()
if ignore_system_exceptions and exc_type in system_exceptions:
raise
for x in range_type(skip):
if tb.tb_next is None:
break
tb = tb.tb_next
tb = Traceback(exc_type, exc_value, tb)
if not show_hidden_frames:
tb.filter_hidden_frames()
return tb
class Line(object):
"""Helper for the source renderer."""
__slots__ = ('lineno', 'code', 'in_frame', 'current')
def __init__(self, lineno, code):
self.lineno = lineno
self.code = code
self.in_frame = False
self.current = False
def classes(self):
rv = ['line']
if self.in_frame:
rv.append('in-frame')
if self.current:
rv.append('current')
return rv
classes = property(classes)
def render(self):
return SOURCE_LINE_HTML % {
'classes': u' '.join(self.classes),
'lineno': self.lineno,
'code': escape(self.code)
}
class Traceback(object):
"""Wraps a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
if not isinstance(exc_type, str):
exception_type = exc_type.__name__
if exc_type.__module__ not in ('__builtin__', 'exceptions'):
exception_type = exc_type.__module__ + '.' + exception_type
else:
exception_type = exc_type
self.exception_type = exception_type
# we only add frames to the list that are not hidden. This follows
# the the magic variables as defined by paste.exceptions.collector
self.frames = []
while tb:
self.frames.append(Frame(exc_type, exc_value, tb))
tb = tb.tb_next
def filter_hidden_frames(self):
"""Remove the frames according to the paste spec."""
if not self.frames:
return
new_frames = []
hidden = False
for frame in self.frames:
hide = frame.hide
if hide in ('before', 'before_and_this'):
new_frames = []
hidden = False
if hide == 'before_and_this':
continue
elif hide in ('reset', 'reset_and_this'):
hidden = False
if hide == 'reset_and_this':
continue
elif hide in ('after', 'after_and_this'):
hidden = True
if hide == 'after_and_this':
continue
elif hide or hidden:
continue
new_frames.append(frame)
# if we only have one frame and that frame is from the codeop
# module, remove it.
if len(new_frames) == 1 and self.frames[0].module == 'codeop':
del self.frames[:]
# if the last frame is missing something went terrible wrong :(
elif self.frames[-1] in new_frames:
self.frames[:] = new_frames
def is_syntax_error(self):
"""Is it a syntax error?"""
return isinstance(self.exc_value, SyntaxError)
is_syntax_error = property(is_syntax_error)
def exception(self):
"""String representation of the exception."""
buf = traceback.format_exception_only(self.exc_type, self.exc_ |
ebroder/anygit | anygit/config/middleware.py | Python | mit | 2,432 | 0.000411 | """Pylons middleware initialization"""
from beaker.middleware import CacheMiddleware, SessionMiddleware
from paste.cascade import Cascade
from paste.registry import RegistryManager
from paste.urlparser import StaticURLParser
from paste.deploy.converters import asbool
from pylons import config
from pylons.middleware import ErrorHandler, StatusCodeRedirect
from pylons.wsgiapp import PylonsApp
from routes.middleware | import RoutesMiddleware
from anygit.config.environment import load_environment
def make_app(global_conf, full_stack=True, static_files=True, **app_conf):
"""Create a Pylons WSGI application and return it
``g | lobal_conf``
The inherited configuration for this application. Normally from
the [DEFAULT] section of the Paste ini file.
``full_stack``
Whether this application provides a full WSGI stack (by default,
meaning it handles its own exceptions and errors). Disable
full_stack when this application is "managed" by another WSGI
middleware.
``static_files``
Whether this application serves its own static files; disable
when another web server is responsible for serving them.
``app_conf``
The application's local configuration. Normally specified in
the [app:<name>] section of the Paste ini file (where <name>
defaults to main).
"""
# Configure the Pylons environment
load_environment(global_conf, app_conf)
# The Pylons WSGI app
app = PylonsApp()
# Routing/Session/Cache Middleware
app = RoutesMiddleware(app, config['routes.map'])
app = SessionMiddleware(app, config)
app = CacheMiddleware(app, config)
# CUSTOM MIDDLEWARE HERE (filtered by error handling middlewares)
if asbool(full_stack):
# Handle Python exceptions
app = ErrorHandler(app, global_conf, **config['pylons.errorware'])
# Display error documents for 401, 403, 404 status codes (and
# 500 when debug is disabled)
if asbool(config['debug']):
app = StatusCodeRedirect(app)
else:
app = StatusCodeRedirect(app, [400, 401, 403, 404, 500])
# Establish the Registry for this application
app = RegistryManager(app)
if asbool(static_files):
# Serve static files
static_app = StaticURLParser(config['pylons.paths']['static_files'])
app = Cascade([static_app, app])
return app
|
sergeneren/anima | anima/ui/scripts/maya.py | Python | bsd-2-clause | 1,800 | 0.000556 | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import logging
from anima import logger
from anima.env.mayaEnv import Maya
from anima.utils import do_db_setup
def set_qt_lib():
"""sets the Qt lib according to the maya version
"""
import pymel
try:
from anima import ui
if pymel.versions.current() > 201500:
ui.SET_PYSIDE2()
else:
ui.SET_PYSIDE()
except AttributeError:
pass
def version_creator(logging_level=logging.WARNING):
"""Helper function for version_creator UI for Maya
"""
# connect to db
do_db_setup()
# use PySide for Maya 2014
set_qt_lib()
from anima.ui import version_creator, models
from anima.env import mayaEnv
reload(version_creator)
reload(models)
reload(mayaEnv)
m = Maya()
import pymel
m.name = "Maya%s" % str(pymel.versions.current())[0:4]
logger.setLevel(logging_level)
version_creator.UI(environment=m)
def version_updater(logging_level=logging.WARNING):
"""helper function for version_ | updater UI for Maya
"""
# connect to db
do_db_setup()
# set Qt lib
set_qt_l | ib()
from anima.ui import version_updater, models
from anima.env import mayaEnv
reload(mayaEnv)
reload(version_updater)
reload(models)
m = Maya()
import pymel
m.name = "Maya" + str(pymel.versions.current())[0:4]
logger.setLevel(logging_level)
# generate a reference_resolution
version_updater.UI(environment=m)
def version_mover():
"""
"""
# connect to db
do_db_setup()
from anima.ui import version_mover as vm
vm.UI()
|
mlperf/training_results_v0.7 | Google/benchmarks/bert/implementations/bert-cloud-TF2.0-tpu-v3-32/tf2_common/modeling/model_training_utils.py | Python | apache-2.0 | 24,871 | 0.006594 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A light weight utilities to train NLP models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import REDACTED
from absl import logging
import tensorflow.compat.v2 as tf
from REDACTED.tf2_common.training import grad_utils
from REDACTED.tf2_common.utils.misc import distribution_utils
from REDACTED.tf2_common.utils.mlp_log import mlp_log
_SUMMARY_TXT = 'training_summary.txt'
_MIN_SUMMARY_STEPS = 10
def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix):
"""Saves model to with provided checkpoint prefix."""
if model_dir:
checkpoint_path = os.path.join(model_dir, checkpoint_prefix)
saved_path = checkpoint.save(checkpoint_path)
logging.info('Saving model as TF checkpoint: %s', saved_path)
return
def _get_input_iterator(input_fn, strategy):
"""Returns distributed dataset iterator."""
# When training with TPU pods, datasets needs to be cloned across
# workers. Since Dataset instance cannot be cloned in eager mode, we instead
# pass callable that returns a dataset.
if not callable(input_fn):
raise ValueError('`input_fn` should be a closure that returns a dataset.')
iterator = iter(
strategy.experimental_distribute_datasets_from_function(input_fn))
return iterator
def _float_metric_value(metric):
"""Gets the value of a float-value keras metric."""
return metric.result().numpy().astype(float)
def steps_to_run(current_step, steps_per_epoch, steps_per_loop):
"""Calculates steps to run on device."""
if steps_per_loop <= 0:
raise ValueError('steps_per_loop should be positive integer.')
if steps_per_loop == 1:
return steps_per_loop
remainder_in_epoch = current_step % steps_per_epoch
if remainder_in_epoch != 0:
return min(steps_per_epoch - remainder_in_epoch, steps_per_loop)
else:
return steps_per_loop
def write_txt_summary(training_summary, summary_dir):
"""Writes a summary text file to record stats."""
summary_path = os.path.join(summary_dir, _SUMMARY_TXT)
with tf.io.gfile.GFile(summary_path, | 'wb') as f:
logging.info('Training Summary: \n%s', str(training_summary))
f.write(json.dumps(training_summary, indent=4))
def run_customized_training_loop(
# pylint: disable=invalid-name
_sentinel=None,
# pylint: enable=invalid-name
strategy=None,
model_fn=None,
loss_fn=None,
model_dir=None,
train_input_fn=None,
steps_per_epoch=None, |
steps_per_loop=1,
epochs=1,
eval_input_fn=None,
eval_steps=None,
steps_between_eval=None,
steps_before_eval_start=None,
stop_threshold=None,
metric_fn=None,
init_checkpoint=None,
custom_callbacks=None,
run_eagerly=False,
sub_model_export_name=None,
explicit_allreduce=False,
device_warmup=False,
synthetic_train_input_fn=None,
pre_allreduce_callbacks=None,
post_allreduce_callbacks=None,
allreduce_bytes_per_pack=0,
enable_checkpoint_and_summary=False,
num_accumulation_steps=1,
stop_steps=None):
"""Run BERT pretrain model training using low-level API.
Arguments:
_sentinel: Used to prevent positional parameters. Internal, do not use.
strategy: Distribution strategy on which to run low level training loop.
model_fn: Function that returns a tuple (model, sub_model). Caller of this
function should add optimizer to the `model` via calling
`model.compile()` API or manually setting `model.optimizer` attribute.
Second element of the returned tuple(sub_model) is an optional sub model
to be used for initial checkpoint -- if provided.
loss_fn: Function with signature func(labels, logits) and returns a loss
tensor.
model_dir: Model directory used during training for restoring/saving model
weights.
train_input_fn: Function that returns a tf.data.Dataset used for training.
steps_per_epoch: Number of steps to run per epoch. At the end of each
epoch, model checkpoint will be saved and evaluation will be conducted
if evaluation dataset is provided.
steps_per_loop: Number of steps per graph-mode loop. In order to reduce
communication in eager context, training logs are printed every
steps_per_loop.
epochs: Number of epochs to train.
eval_input_fn: Function that returns evaluation dataset. If none,
evaluation is skipped.
eval_steps: Number of steps to run evaluation. Required if `eval_input_fn`
is not none.
steps_between_eval: Number of steps between evals
steps_before_eval_start: Number of steps to skip before starting eval
stop_threshold: Stop threshold for MLPerf once accuracy achieved
metric_fn: A metrics function that returns a Keras Metric object to record
evaluation result using evaluation dataset or with training dataset
after every epoch.
init_checkpoint: Optional checkpoint to load to `sub_model` returned by
`model_fn`.
custom_callbacks: A list of Keras Callbacks objects to run during
training. More specifically, `on_batch_begin()`, `on_batch_end()`,
methods are invoked during training.
run_eagerly: Whether to run model training in pure eager execution. This
should be disable for TPUStrategy.
sub_model_export_name: If not None, will export `sub_model` returned by
`model_fn` into checkpoint files. The name of intermediate checkpoint
file is {sub_model_export_name}_step_{step}.ckpt and the last
checkpint's name is {sub_model_export_name}.ckpt;
if None, `sub_model` will not be exported as checkpoint.
explicit_allreduce: Whether to explicitly perform gradient allreduce,
instead of relying on implicit allreduce in optimizer.apply_gradients().
default is False. For now, if training using FP16 mixed precision,
explicit allreduce will aggregate gradients in FP16 format. For TPU and
GPU training using FP32, explicit allreduce will aggregate gradients in
FP32 format.
device_warmup: Whether or not to enable device warmup. This
runs the training and eval loop on synthetic data to pre-compile XLA
and TF tracing before accessing data.
synthetic_train_input_fn: Function that returns synthetic training
dataset. This is used in device warmup.
pre_allreduce_callbacks: A list of callback functions that takes gradients
and model variables pairs as input, manipulate them, and returns a new
gradients and model variables paris. The callback functions will be
invoked in the list order and before gradients are allreduced.
Default is no callbacks. Only used when explicit_allreduce=True.
post_allreduce_callbacks: A list of callback functions that takes
gradients and model variables pairs as input, manipulate them, and
returns a new gradients and model variables paris. The callback
functions will be invoked in the list order and right before gradients
are applied to variables for updates. Default is no callbacks. Only used
when explicit_allreduce=True.
allreduce_bytes_per_pack: A non-negative integer. Breaks collective
operations into packs of certain size. If it's zero, all gradients are
in one pack.
enable_checkpoint_and_summary: Whether to save checkpoint and summary.
stop_steps: Th |
manojpandey/simple-blog | blog/views.py | Python | mit | 1,490 | 0.004027 | from django.shortcuts import render
from django.utils import timezone
from .models import Post
from django.shortcuts import render, get_object_or_404
from .forms import PostForm
from django.shortcuts import redirect
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_v | alid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
| else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form}) |
prometheanfire/cloud-init | cloudinit/sources/DataSourceAzure.py | Python | gpl-3.0 | 21,832 | 0 | # vi: ts=4 expandtab
#
# Copyright (C) 2013 Canonical Ltd.
#
# Author: Scott Moser <scott.moser@canonical.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
import contextlib
import crypt
import fnmatch
from functools import partial
import os
import os.path
import time
from xml.dom import minidom
import xml.etree.ElementTree as ET
from cloudinit import log as logging
from cloudinit.settings import PER_ALWAYS
from cloudinit import sources
from cloudinit.sources.helpers.azure import get_metadata_from_fabric
from cloudinit import util
LOG = logging.getLogger(__name__)
DS_NAME = 'Azure'
DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
AGENT_START = ['service', 'walinuxagent', 'start']
BOUNCE_COMMAND = [
'sh', '-xc',
"i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"
]
BUILTIN_DS_CONFIG = {
'agent_command': AGENT_START,
'data_dir': "/var/lib/waagent",
'set_hostname': True,
'hostname_bounce': {
'interface': 'eth0',
'policy': True,
'command': BOUNCE_COMMAND,
'hostname_command': 'hostname',
},
'disk_aliases': {'ephemeral0': '/dev/sdb'},
}
BUILTIN_CLOUD_CONFIG = {
'disk_setup': {
'ephemeral0': {'table_type': 'gpt',
'layout': [100],
'overwrite': True},
},
'fs_setup': [{'filesystem': 'ext4',
'device': 'ephemeral0.1',
'replace_fs': 'ntfs'}],
}
DS_CFG_PATH = ['datasource', DS_NAME]
DEF_EPHEMERAL_LABEL = 'Temporary Storage'
# The redacted password fails to meet password complexity requirements
# so we can safely use this to mask/redact the password in the ovf-env.xml
DEF_PASSWD_REDACTION = 'REDACTED'
def get_hostname(hostname_command='hostname'):
return util.subp(hostname_command, capture=True)[0].strip()
def set_hostname(hostname, hostname_command='hostname'):
util.subp([hostname_command, hostname])
@contextlib.contextmanager
def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
"""
Set a temporary hostname, restoring the previous hostname on exit.
Will have the value of the previous hostname when used as a context
manager, or None if the hostname was not changed.
"""
policy = cfg['hostname_bounce']['policy']
previous_hostname = get_hostname(hostname_command)
if (not util.is_true(cfg.get('set_hostname')) or
util.is_false(policy) or
(previous_hostname == temp_hostname and policy != 'force')):
yield None
return
set_hostname(temp_hostname, hostname_command)
try:
yield previous_hostname
finally:
set_hostname(previous_hostname, hostname_command)
class DataSourceAzureNet(sources.DataSource):
FALLBACK_LEASE = '/var/lib/dhcp/dhclient.eth0.leases'
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed_dir = os.path.join(paths.seed_dir, 'azure')
self.cfg = {}
self.seed = None
self.ds_cfg = util.mergemanydict([
util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
BUILTIN_DS_CONFIG])
self.dhclient_lease_file = self.paths.cfgs.get('dhclient_lease',
self.FALLBACK_LEASE)
def __str__(self):
root = sources.DataSource.__str__(self)
return "%s [seed=%s]" % (root, self.seed)
def get_metadata_from_agent(self):
temp_hostname = self.metadata.get('local-hostname')
hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
with temporary_hostname(temp_hostname, self.ds_cfg,
hostname_command=hostname_command) \
as previous_hostname:
if (previous_hostname is not None and
util.is_true(self.ds_cfg.get('set_hostname'))):
cfg = self.ds_cfg['hostname_bounce']
try:
| perform_hostname_bounce(hostname=temp_hostname,
cfg=cfg,
prev_hostname=previous_hostname)
except Exception as e:
LOG.warn("Failed publishing | hostname: %s", e)
util.logexc(LOG, "handling set_hostname failed")
try:
invoke_agent(self.ds_cfg['agent_command'])
except util.ProcessExecutionError:
# claim the datasource even if the command failed
util.logexc(LOG, "agent command '%s' failed.",
self.ds_cfg['agent_command'])
ddir = self.ds_cfg['data_dir']
fp_files = []
key_value = None
for pk in self.cfg.get('_pubkeys', []):
if pk.get('value', None):
key_value = pk['value']
LOG.debug("ssh authentication: using value from fabric")
else:
bname = str(pk['fingerprint'] + ".crt")
fp_files += [os.path.join(ddir, bname)]
LOG.debug("ssh authentication: "
"using fingerprint from fabirc")
missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
func=wait_for_files,
args=(fp_files,))
if len(missing):
LOG.warn("Did not find files, but going on: %s", missing)
metadata = {}
metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
return metadata
def get_data(self):
# azure removes/ejects the cdrom containing the ovf-env.xml
# file on reboot. So, in order to successfully reboot we
# need to look in the datadir and consider that valid
ddir = self.ds_cfg['data_dir']
candidates = [self.seed_dir]
candidates.extend(list_possible_azure_ds_devs())
if ddir:
candidates.append(ddir)
found = None
for cdev in candidates:
try:
if cdev.startswith("/dev/"):
ret = util.mount_cb(cdev, load_azure_ds_dir)
else:
ret = load_azure_ds_dir(cdev)
except NonAzureDataSource:
continue
except BrokenAzureDataSource as exc:
raise exc
except util.MountFailedError:
LOG.warn("%s was not mountable", cdev)
continue
(md, self.userdata_raw, cfg, files) = ret
self.seed = cdev
self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
found = cdev
LOG.debug("found datasource in %s", cdev)
break
if not found:
return False
if found == ddir:
LOG.debug("using files cached in %s", ddir)
# azure / hyper-v provides random data here
seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
quiet=True, decode=False)
if seed:
self.metadata['random_seed'] = seed
# now update ds_cfg to reflect contents pass in config
user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
# walinux agent writes files world readable, but expects
# the directory to be protected.
write_files(ddir, files, dirmode=0o700)
|
byung-u/ProjectEuler | Problem_200_299/euler_211_artVark.py | Python | mit | 16,299 | 0.003927 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
################################################################################
#
# Project Euler -- Problem 211
#
################################################################################
#
# Divisor sum squares
#
################################################################################
from math import sqrt
from datetime import datetime
# <> -> !=
def gcd(big,lit):
"""
Compute the greatest common denominator between two integers
"""
while not lit == 0:
temp = lit
lit = big % lit
big = temp
return big
class primez():
"""
Prime number generator
"""
def __init__(self,syze):
"""
Initialize a sieve to handle a prime number table of the size
specified.
"""
self.plist = []
self.limit = syze
self.sieve = syze * [True]
j = self.limit
for i in xrange(4,syze,2):
self.sieve[i] = False
for nprime in xrange(3,syze,2):
if self.sieve[nprime] == False:
continue
spt = nprime*nprime
if spt > syze:
break
for i in xrange(spt,syze,nprime):
self.sieve[i] = False
def getList(self):
"""
Return a list of prime numbers.
"""
self.plist = [2]
for i in xrange(3,self.limit,2):
if self.sieve[i] == True:
self.plist.append(i)
return self.plist
def amIprime(self,number,emsg=True):
"""
Return True if number is prime, False if it is not.
"""
if number < self.limit:
return self.sieve[number]
if len(self.plist) == 0:
tlist = self.getList()
for chkn in self.plist:
if chkn * chkn > number:
return True
if number % chkn == 0:
return False
return True
class problem211():
def __init__(self,upbound):
"""
self.limit is the sqrt of self.biglimit.
self.biglimit is 64,000,000 (as specified by the problem)
"""
self.biglimit = upbound
self.limit = int(sqrt(self.biglimit))
self.primo = primez(self.limit)
self.plist = self.primo.getList()
self.getpSols()
self.setLower()
self.polNums()
def doit(self):
"""
sigma2(n * m) = sigma2(n) * sigma2(m) where n and m are relatively
prime.
General plan here is to:
Calculate all solutions that have prime factors under sqrt(64m) then
calculate all solutions that have one prime factor between sqrt(64m)
and 64m. Any number with two or more prime factors in that range is
over 64m.
solutions is a dictionary of sigma2 sums for all values x^n less
than 8000 where x is a prime and n is a positive integer.
Note that since entries in solutions are used to calculate if
products involving them are square, factors of p^2 can be removed.
Once these numbers are generated, entries of the form [x Y] and [Y z}
are merged to form [x z] and the old entries are eliminated.
When all merging has been done, the last entries in solutions are
the values that we want, which should yield all solutions that
only use factors less than sqrt(64m). Some extra numbers can be
gotten by multiplying identities together.
As far as finding prime factors of solutions greater than sqrt(64m),
a prime number x, has a divisor square some value of x^2 + 1. This
must match an existing (1-8000) level solution in order to be valid.
So x^2 + 1 = Dy^2 where D is the solution for a number in the 1-8000
range, and y is another integer. Note that this can be simplified
into a variation of Pell's Equation.
So solving Pell's equation for D numbers in the range (1-8000)
would yield those numbers with factors in the range 8000 to 64000000.
This runs in under a second.
"""
self.reduceSolutions()
keyz = self.solutions.keys()
keyz.sort()
prodarr = self.mergeIdent(keyz)
keyz += self.chkForMore(keyz,prodarr)
localans = self.solvePell()
setx = set(keyz+localans)
ansvec = list(setx)
answer = 1+sum(ansvec)
return answer
def reduceSolutions(self):
"""
First half of the program.
At this point, self.solutions contains factorizations of all primes
and their powers. Reduce these entr | ies by combining them iteratively,
creating new entries (that are not prime), and removing previously
combined entries. When done, self.solutions should contain empty
entries only for numbers that solve this problem.
"""
templist = []
wh | ile len(self.hist) > 0:
minval = self.limit
for j in self.hist:
if self.hist[j] == minval:
lvec = self.getSame(j,self.hist[j])
ok = True
for k in lvec:
if k in templist:
ok = False
break
if ok:
templist += lvec
if self.hist[j] < minval:
minval = self.hist[j]
templist = [] + self.getSame(j,self.hist[j])
for j in xrange(0,len(templist),minval):
mergev = templist[j:j+minval]
self.reducev(mergev)
self.polNums()
def mergeIdent(self,keyz):
"""
The previous routine does not account for identities that can be
multiplied together. This routine finds those identity products
consisting of two identity values and adds them to the solution list.
ChkForMore uses this data as a starting point to determine if there are
identity value combinations of 3 or more that can be multiplied
together.
"""
prodarr = []
stind = 1
tval = keyz[0] * keyz[stind]
while tval < self.biglimit:
stind += 1
tval = keyz[0] * keyz[stind]
for i1 in xrange(0,stind-1):
for i2 in xrange(i1+1,stind):
if gcd(keyz[i2],keyz[i1]) <> 1:
continue
prod = keyz[i1]*keyz[i2]
if prod > self.biglimit:
continue
prodarr.append(prod)
prodarr.sort()
return prodarr
def chkForMore(self,keyz,prodarr):
"""
keyz is a set of identity solutions. Prodarr is a list of products
of 2 keyz values that form another valid number. Generate further
identities (if they exist) by multiplying keyz values with prodarr
values. and if any are generated, loop again using the newly generated
list as the next prodarr input. Prodarr values are stored in the
combtab list. When no further entries can be generated, return all
individual values in the lists in the combtab list as one list.
"""
combtab = [prodarr]
prevind = 0
while True:
newdata = self.multMerge(keyz,combtab[prevind])
if len(newdata) == 0:
break
prevind += 1
combtab.append(newdata)
retval = []
for pentry in combtab:
keyz += pentry
retval.sort()
return retval
def multMerge(self,keyz,resultz):
"""
Multiply the keyz values with resultz values, saving any valid
numbers generated in mData (the parameter returned).
"""
mData = []
for i in keyz:
if i * resultz[0] > self.biglimit:
return mData
for j in resultz:
nbnum = i * j
if nbnum > self.biglimit:
break
if gcd(j,i) == 1:
mData.append(nbnum)
return mData
def solvePell(self |
duyet-website/api.duyet.net | lib/limits/strategies.py | Python | mit | 5,640 | 0.001418 | """
rate limiting strategies
"""
from abc import ABCMeta, abstractmethod
import weakref
import six
import time
@six.add_metaclass(ABCMeta)
class RateLimiter(object):
def __init__(self, storage):
self.storage = weakref.ref(storage)
@abstractmethod
def hit(self, item, *identifiers):
"""
creates a hit on the rate limit and returns True if successful.
:param item: a :class:`RateLimitItem` instance
:param identifiers: variable list of strings to uniquely identify the
limit
:return: True/False
"""
raise NotImplementedError
@abstractmethod
def test(self, item, *identifiers):
"""
checks the rate limit and returns True if it is not
currently exceeded.
:param item: a :class:`RateLimitItem` instance
:param identifiers: variable list of strings to uniquely identify the
limit
:return: True/False
"""
raise NotImplementedError
@abstractmethod
def get_window_stats(self, item, *identifiers):
"""
returns the number of requests remaining and reset of this limit.
:param item: a :class:`RateLimitItem` instance
:param identifiers: variable list of strings to uniquely identify the
limit
:return: tuple (reset time (int), remaining (int))
"""
raise NotImplementedError
class MovingWindowRateLimiter(RateLimiter):
"""
Reference: :ref:`moving-window`
"""
def __init__(self, storage):
if not (hasattr(storage, "acquire_entry") or hasattr(storage, "get_moving_window")):
raise NotImplementedError("MovingWindowRateLimiting is not implemented for storage of type %s" % storage.__class__)
super(MovingWindowRateLimiter, self).__init__(storage)
def hit(self, item, *identifiers):
"""
creates a hit on the rate limit and returns True if successful.
:param item: a :class:`RateLimitItem` instance
:param identifiers: variable list of strings to uniquely identify the
limit
:return: True/False
"""
return self.storage().acquire_entry(
item.key_for(*identifiers), item.amount,
item.get_expiry()
)
def test(self, item, *identifiers):
"""
checks the rate limit and returns True if it is not
currently exceeded.
:param item: a :class:`RateLimitItem` instance
:param identifiers: va | riable list of strings to uniquely identify the
limit
:return: True/False
| """
return self.storage().get_moving_window(
item.key_for(*identifiers),
item.amount, item.get_expiry(),
)[1] < item.amount
def get_window_stats(self, item, *identifiers):
"""
returns the number of requests remaining within this limit.
:param item: a :class:`RateLimitItem` instance
:param identifiers: variable list of strings to uniquely identify the
limit
:return: tuple (reset time (int), remaining (int))
"""
window_start, window_items = self.storage().get_moving_window(
item.key_for(*identifiers), item.amount, item.get_expiry()
)
reset = window_start + item.get_expiry()
return (reset, item.amount - window_items)
class FixedWindowRateLimiter(RateLimiter):
"""
Reference: :ref:`fixed-window`
"""
def hit(self, item, *identifiers):
"""
creates a hit on the rate limit and returns True if successful.
:param item: a :class:`RateLimitItem` instance
:param identifiers: variable list of strings to uniquely identify the
limit
:return: True/False
"""
return (
self.storage().incr(item.key_for(*identifiers), item.get_expiry())
<= item.amount
)
def test(self, item, *identifiers):
"""
checks the rate limit and returns True if it is not
currently exceeded.
:param item: a :class:`RateLimitItem` instance
:param identifiers: variable list of strings to uniquely identify the
limit
:return: True/False
"""
return self.storage().get(item.key_for(*identifiers)) < item.amount
def get_window_stats(self, item, *identifiers):
"""
returns the number of requests remaining and reset of this limit.
:param item: a :class:`RateLimitItem` instance
:param identifiers: variable list of strings to uniquely identify the
limit
:return: tuple (reset time (int), remaining (int))
"""
remaining = max(0, item.amount - self.storage().get(item.key_for(*identifiers)))
reset = self.storage().get_expiry(item.key_for(*identifiers))
return (reset, remaining)
class FixedWindowElasticExpiryRateLimiter(FixedWindowRateLimiter):
"""
Reference: :ref:`fixed-window-elastic`
"""
def hit(self, item, *identifiers):
"""
creates a hit on the rate limit and returns True if successful.
:param item: a :class:`RateLimitItem` instance
:param identifiers: variable list of strings to uniquely identify the
limit
:return: True/False
"""
return (
self.storage().incr(item.key_for(*identifiers), item.get_expiry(), True)
<= item.amount
)
STRATEGIES = {
"fixed-window": FixedWindowRateLimiter,
"fixed-window-elastic-expiry": FixedWindowElasticExpiryRateLimiter,
"moving-window": MovingWindowRateLimiter
} |
ewandor/home-assistant | homeassistant/components/sensor/tellduslive.py | Python | apache-2.0 | 3,707 | 0 | """
Support for Tellstick Net/Telstick Live.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.tellduslive/
"""
import logging
from homeassistant.components.tellduslive import TelldusLiveEntity
from homeassistant.const import TEMP_CELSIUS
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPE_TEMPERATURE = 'temp'
SENSOR_TYPE_HUMIDITY = 'humidity'
SENSOR_TYPE_RAINRATE = 'rrate'
SENSOR_TYPE_RAINTOTAL = 'rtot'
SENSOR_TYPE_WINDDIRECTION = 'wdir'
SENSOR_TYPE_WINDAVERAGE = 'wavg'
SENSOR_TYPE_WINDGUST = 'wgust'
SENSOR_TYPE_UV = 'uv'
SENSOR_TYPE_WATT = 'watt'
SENSOR_TYPE_LUMINANCE = 'lum'
SENSOR_TYPE_DEW_POINT = 'dewp'
SENSOR_TYPE_BAROMETRIC_PRESSURE = 'barpress'
SENSOR_TYPES = {
SENSOR_TYPE_TEMPERATURE: ['Temperature', TEMP_CELSIUS, 'mdi:thermometer'],
SENSOR_TYPE_HUMIDITY: ['Humidity', '%', 'mdi:water'],
SENSOR_TYPE_RAINRATE: ['Rain rate', 'mm/h', 'mdi:water'],
SENSOR_TYPE_RAINTOTAL: ['Rain total', 'mm', 'mdi:water'],
SENSOR_TYPE_WINDDIRECTION: ['Wind direction', '', ''],
SENSOR_TYPE_WINDAVERAGE: ['Wind average', 'm/s', ''],
SENSOR_TYPE_WINDGUST: ['Wind gust', 'm/s', ''],
SENSOR_TYPE_UV: ['UV', 'UV', ''],
SENSOR_TYPE_WATT: ['Power', 'W', ''],
SENSOR_TYPE_LUMINANCE: ['Luminance', 'lx', ''],
SENSOR_TYPE_DEW_POINT: ['Dew Point', TEMP_CELSIUS, 'mdi:thermometer'],
SENSOR_TYPE_BAROMETRIC_PRESSURE: ['Barometric Pressure', 'kPa', ''],
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Tellstick sensors."""
if discovery_info is None:
return
add_devices(TelldusLiveSensor(hass, sensor) for sensor in discovery_info)
class TelldusLiveSensor(TelldusLiveEntity):
"""Representation of a Telldus Live sensor."""
@property
def device_id(self):
"""Return id of the device."""
return self._id[0]
@property
def _type(self):
"""Return the type of the sensor."""
return self._id[1]
@property
def _value(self):
"""Return value of the sensor."""
return self.device.value(*self._id[1:])
@property
def _value_as_temperature(self):
"""Return the value as temperature."""
return round(float(self._value), 1)
@property
def _value_as_luminance(self):
"""Return the value as luminance."""
return round(float(self._value), 1)
@property
| def _value_as_humidity(self):
"""Return the value as humidity."""
return int(round(float(self._value)))
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(
super().name,
self.quantity_name or '')
@property
def state(self):
"""Return the state of the sensor."""
if not self | .available:
return None
elif self._type == SENSOR_TYPE_TEMPERATURE:
return self._value_as_temperature
elif self._type == SENSOR_TYPE_HUMIDITY:
return self._value_as_humidity
elif self._type == SENSOR_TYPE_LUMINANCE:
return self._value_as_luminance
return self._value
@property
def quantity_name(self):
"""Name of quantity."""
return SENSOR_TYPES[self._type][0] \
if self._type in SENSOR_TYPES else None
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return SENSOR_TYPES[self._type][1] \
if self._type in SENSOR_TYPES else None
@property
def icon(self):
"""Return the icon."""
return SENSOR_TYPES[self._type][2] \
if self._type in SENSOR_TYPES else None
|
Orav/kbengine | kbe/src/lib/python/Tools/scripts/suff.py | Python | lgpl-3.0 | 536 | 0 | #! /usr/bin/env python3
# suff
#
# show different suffixes amongst arguments
import sys
def main():
files = sys.argv[1:]
suffixes = {}
for filename in files:
suff = getsuffix(filename)
suffixes.setdefault(suff, []).append(filename)
for suff, filenames in sorted(suffixes.items()):
print(repr(suff), len(fi | lenames))
def getsuffix(filename):
name, sep, suff = filename.rpartition('.')
return sep + suff if sep else '' |
if __name__ == '__main__':
main()
|
csparpa/check | fluentcheck/assertions_is/geo.py | Python | mit | 520 | 0 | from fluentcheck.assertions_is.base_is import IsBase
class __IsGeo(IsBase):
@property
def latitude(self) -> "Is":
self.check.is_latitude()
return self
@property
def longitude(self) | -> "Is":
self.check.i | s_longitude()
return self
@property
def azimuth(self) -> "Is":
self.check.is_azimuth()
return self
# noinspection SpellCheckingInspection
@property
def geopoint(self) -> "Is":
self.check.is_geopoint()
return self
|
anhstudios/swganh | data/scripts/templates/object/tangible/wearables/armor/mandalorian/shared_armor_mandalorian_belt.py | Python | mit | 490 | 0.044898 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kern | el):
result = Tangible()
result.template = "object/tangible/wearables/armor/mandalorian/shared_armor_mandalorian_belt.iff"
result.attribute_template_id = 0
result.stfName("wearables_name","armor_mandalorian_belt")
#### BEGIN MODIFICATIONS ####
#### | END MODIFICATIONS ####
return result |
arunkgupta/gramps | gramps/gui/views/treemodels/citationbasemodel.py | Python | gpl-2.0 | 6,368 | 0.006281 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2011 Tim G L Lyons, Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
CitationBaseModel classes for GRAMPS.
"""
#-------------------------------------------------------------------------
#
# python modules
#
#-------------------------------------------------------------------------
import cgi
import logging
log = logging.getLogger(".")
LOG = logging.getLogger(".citation")
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.datehandler import format_time, get_date, get_date_valid
from gramps.gen.lib import Citation
from gramps.gen.utils.string import confidence
from gramps.gen.config import config
#-------------------------------------------------------------------------
#
# COLUMN constants
#
#-------------------------------------------------------------------------
# These are the column numbers in the serialize/unserialize interfaces in
# the Citation object
COLUMN_HANDLE = 0
COLUMN_ID = 1
CO | LUMN_DATE = 2
COLUMN_PAGE = 3
COLUMN_CONFIDENCE = 4
COLUMN_SOURCE = 5
COLUMN_CHANGE = 9
# Data for the Source object
COLUMN2_HANDLE = 0
COLUMN2_ID = 1
COLUMN2_TITLE = 2
COLUMN2_AUTHOR = 3
COLUMN2_PUBINFO = 4
COLUMN2_ABBREV = 7
COLUMN2_CHANGE = 8
INVALID_DATE_FORMAT = config.get('preferences.invalid-date-format')
#-------------------------------------------------------------------------
#
# CitationModel
#
#--------- | ----------------------------------------------------------------
class CitationBaseModel(object):
# Fields access when 'data' is a Citation
def citation_date(self, data):
if data[COLUMN_DATE]:
citation = Citation()
citation.unserialize(data)
date_str = get_date(citation)
if date_str != "":
retval = cgi.escape(date_str)
if not get_date_valid(citation):
return INVALID_DATE_FORMAT % retval
else:
return retval
return u''
def citation_sort_date(self, data):
if data[COLUMN_DATE]:
citation = Citation()
citation.unserialize(data)
retval = "%09d" % citation.get_date_object().get_sort_value()
if not get_date_valid(citation):
return INVALID_DATE_FORMAT % retval
else:
return retval
return u''
def citation_id(self, data):
return unicode(data[COLUMN_ID])
def citation_page(self, data):
return unicode(data[COLUMN_PAGE])
def citation_confidence(self, data):
return unicode(confidence[data[COLUMN_CONFIDENCE]])
def citation_handle(self, data):
return unicode(data[COLUMN_HANDLE])
def citation_change(self, data):
return format_time(data[COLUMN_CHANGE])
def citation_sort_change(self, data):
return "%012x" % data[COLUMN_CHANGE]
def citation_source(self, data):
return data[COLUMN_SOURCE]
def citation_src_title(self, data):
source_handle = data[COLUMN_SOURCE]
try:
source = self.db.get_source_from_handle(source_handle)
return unicode(source.get_title())
except:
return u''
def citation_src_id(self, data):
source_handle = data[COLUMN_SOURCE]
try:
source = self.db.get_source_from_handle(source_handle)
return unicode(source.gramps_id)
except:
return u''
def citation_src_auth(self, data):
source_handle = data[COLUMN_SOURCE]
try:
source = self.db.get_source_from_handle(source_handle)
return unicode(source.get_author())
except:
return u''
def citation_src_abbr(self, data):
source_handle = data[COLUMN_SOURCE]
try:
source = self.db.get_source_from_handle(source_handle)
return unicode(source.get_abbreviation())
except:
return u''
def citation_src_pinfo(self, data):
source_handle = data[COLUMN_SOURCE]
try:
source = self.db.get_source_from_handle(source_handle)
return unicode(source.get_publication_info())
except:
return u''
def citation_src_chan(self, data):
source_handle = data[COLUMN_SOURCE]
try:
source = self.db.get_source_from_handle(source_handle)
return format_time(source.change)
except:
return u''
def citation_tooltip(self, data):
return u'Citation tooltip'
# Fields access when 'data' is a Source
def source_handle(self, data):
return unicode(data[COLUMN2_HANDLE])
def source_src_title(self, data):
return unicode(data[COLUMN2_TITLE])
def source_src_id(self, data):
return unicode(data[COLUMN2_ID])
def source_src_auth(self, data):
return unicode(data[COLUMN2_AUTHOR])
def source_src_abbr(self, data):
return unicode(data[COLUMN2_ABBREV])
def source_src_pinfo(self, data):
return unicode(data[COLUMN2_PUBINFO])
def source_src_chan(self, data):
return format_time(data[COLUMN2_CHANGE])
def source_sort2_change(self, data):
return "%012x" % data[COLUMN2_CHANGE]
def source_tooltip(self, data):
return u'Source tooltip'
def dummy_sort_key(self, data):
# dummy sort key for columns that don't have data
return None
|
DaniAkash/hackerrank-solutions | Algorithms/Strings/Alternating Characters/Alternating Characters.py | Python | apache-2.0 | 822 | 0.015815 | '''
Copyright 2017-Present Dani Akash
Licensed under the Apache License, Version 2.0 | (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT | WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
def solution():
s=list(input().strip())
i,count=1,0
while i<len(s):
if s[i]==s[i-1]:
count+=1
i+=1
return count
testCases = int(input().strip())
while(testCases):
x=solution()
print(x)
testCases-=1 |
hebaishi/TC2014hackathon | flaskapp/app/routes.py | Python | gpl-2.0 | 1,217 | 0.027116 | # -*- coding: utf-8 -*-
import random
from flask import Flask, render_template,request, flash
from forms import Params
from crawler_amazon import amazoncrawl
from crawler_asos import asoscrawl
app = Flask(__name__)
app.secret_key = 'development key'
@app.route('/',methods = ['GET', 'POST'])
def params():
form = Params()
if request.method == 'POST':
if (form.weight.data is None or form.height.data is None or form.gender.data is None or form.keywords.data is None):
flash('All fields are required.')
print form.height.data
print form.weight.data
print form.gender.data
print form.keywords.data
print "Error"
| return render_template('params.html', form=form)
else:
par = dict()
| par['height'] = form.height.data
par['weight'] = form.weight.data
par['gender'] = form.gender.data
par['keywords'] = form.keywords.data
outlist = amazoncrawl(par)
outlist = outlist + asoscrawl(par)
random.shuffle(outlist)
return render_template('output.html', outlist=outlist)
elif request.method == 'GET':
return render_template('params.html', form=form)
if __name__ == '__main__':
app.run(debug=True)
|
totalvoice/totalvoice-python | totalvoice/cliente/api/totalvoice.py | Python | mit | 1,052 | 0.013308 | from __future__ import absolute_import
import requests
from .helper import utils
class Totalvoice(object):
def __init__(self, cliente):
self.cliente | = cliente
def enviar(self, *args):
raise NotImplemented | Error
def get_by_id(self, id):
raise NotImplementedError
def editar(self, *args):
raise NotImplementedError
def get_relatorio(self, *args):
raise NotImplementedError
def deletar(self, id):
raise NotImplementedError
def get_request(self, host, params = None):
if params != None:
response = requests.get(host, headers=utils.build_header(self.cliente.access_token), params=params)
else:
response = requests.get(host, headers=utils.build_header(self.cliente.access_token))
return response.content
def build_host(self, host, route, values=None):
host += route
if values is not None:
for val in values:
host += "/" + val
return host
|
scopely-devops/skew | skew/resources/aws/lambda.py | Python | apache-2.0 | 1,965 | 0 | # Copyright (c) 2014 Scopely, Inc.
# Copyright (c) 2015 Mitch Garnaat
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
from skew.resources.aws import AWSResource
LOG = log | ging.getLogger(__name__)
class Function(AWSResource):
@classmethod
def enumerate(cls, arn, region, account, resource_id=None, **kwargs):
resources = super(Function, cls).enumerate(arn, region, account,
| resource_id, **kwargs)
for r in resources:
r.data['EventSources'] = []
kwargs = {'FunctionName': r.data['FunctionName']}
response = r._client.call('list_event_source_mappings', **kwargs)
for esm in response['EventSourceMappings']:
r.data['EventSources'].append(esm['EventSourceArn'])
return resources
class Meta(object):
service = 'lambda'
type = 'function'
enum_spec = ('list_functions', 'Functions', None)
detail_spec = None
id = 'FunctionName'
filter_name = None
name = 'FunctionName'
date = 'LastModified'
dimension = 'FunctionName'
tags_spec = ('list_tags', 'Tags',
'Resource', 'arn')
@classmethod
def filter(cls, arn, resource_id, data):
function_name = data.get(cls.Meta.id)
LOG.debug('%s == %s', resource_id, function_name)
return resource_id == function_name
@property
def arn(self):
return self.data.get('FunctionArn')
|
drpngx/tensorflow | tensorflow/contrib/periodic_resample/__init__.py | Python | apache-2.0 | 1,176 | 0.00085 | # =============================================================================
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LIC | ENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the s | pecific language governing permissions and
# limitations under the License.
# =============================================================================
"""Custom op used by periodic_resample."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.periodic_resample.python.ops.periodic_resample_op import periodic_resample
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ["periodic_resample"]
remove_undocumented(__name__, _allowed_symbols)
|
Azure/azure-sdk-for-python | sdk/graphrbac/azure-graphrbac/azure/graphrbac/models/check_group_membership_result_py3.py | Python | mit | 1,366 | 0.002196 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CheckGroupMembershipResult(Model):
"""Server response for IsMemberOf API call.
:param additional_properties: Unmatched properties from the message are
deseriali | zed this collection
:type additional_properties: dict[str, object]
:param value: True | if the specified user, group, contact, or service
principal has either direct or transitive membership in the specified
group; otherwise, false.
:type value: bool
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': 'bool'},
}
def __init__(self, *, additional_properties=None, value: bool=None, **kwargs) -> None:
super(CheckGroupMembershipResult, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.value = value
|
gobstones/PyGobstones-Lang | pygobstoneslang/lang/ast.py | Python | gpl-3.0 | 24,838 | 0.002657 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2011, 2012 Pablo Barenbaum <foones@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Abstract syntax tree representation.
Includes the implementation of the semantic actions in the parser.
"""
from itertools import groupby
import pygobstoneslang.common.i18n as i18n
import pygobstoneslang.common.position as position
import pygobstoneslang.common.utils as utils
import bnf_parser
import gbs_builtins
import copy
import json
def fix_string_literal(node):
if node.children[1].type == 'string':
node.children[1].value = node.children[1].value.strip('"')
return node
def _tree_insert_left(operator_type, opr1, arg1, right):
"""Convert trees of the form:
arg1 opr1 (arg2 opr2 arg3)
|--- right ----|
into trees of the form:
(arg1 opr1 arg2) opr2 arg3
|---- left ----|
whenever "right" is of the form (arg2 opr2 arg3)
with the operator type of right is `operator_type`.
"""
if isinstance(right, ASTNode) and right.children[0] == operator_type:
# Swap to left
opr2 = right.children[1]
arg2 = right.children[2]
arg3 = right.children[3]
left = _tree_insert_left(operator_type, opr1, arg1, arg2)
return ASTNode(
[operator_type, opr2, left, arg3],
arg1.pos_begin,
right.pos_end)
else:
# Do not swap
return ASTNode(
[operator_type, opr1, arg1, right],
arg1.pos_begin,
right.pos_end)
def _is_range_expr(node):
return len(node.children) in [3,4] and node.children[-2] == 'range_to'
def _make_range_generator(expr_range):
pos_b = expr_range.pos_begin
pos_e = expr_range.pos_end
if len(expr_range.children) == 4:
first, second, _, last = expr_range.children
elif len(expr_range.children) == 3:
first, _, last = expr_range.children
second = ASTNode(['literal',
bnf_parser.Token('symbol', 'NoSecondElementForRange', pos_b, pos_e)],
pos_b, pos_e)
else:
assert False
return ASTNode(['funcCall',
bnf_parser.Token('lowerid', '_range', pos_b, pos_e),
ASTNode([first, last, second], pos_b, pos_e)
], pos_b, pos_e)
def _make_list_expression(expr_list):
"""Convert a list of expressions [e1, ..., eN] into an
expression representing a Gobstones list, of the form:
cons(e1, ... cons(eN, nil) ... )
"""
pos_b = expr_list.pos_begin
pos_e = expr_list.pos_end
lst = expr_list.children
ret = ASTNode(['funcCall',
bnf_parser.Token('lowerid', '[]', pos_b, pos_e),
ASTNode([], pos_b, pos_e)
], pos_b, pos_e)
for exp in utils.seq_reversed(lst):
pos_b = exp.pos_begin
pos_e = exp.pos_end
exp_list = ASTNode(['funcCall',
bnf_parser.Token('lowerid', '[x]', pos_b, pos_e),
ASTNode([exp], pos_b, pos_e)
], pos_b, pos_e)
ret = ASTNode(['listop',
bnf_parser.Token('lowerid', '++', pos_b, pos_e),
exp_list,
ret], pos_b, pos_e | )
return ret
def _concat_internal_list_generators(expr_list):
return expr_list
def _infixl(subtr | ees, action):
"Expands an action corresponding to an INFIXL declaration."
# arg1 opr1 (arg2 opr2 arg3) ---> (arg1 opr1 arg2) opr2 arg3
# |---- right ---| |---- left ----|
operator_type = action[1]
arg1 = subtrees[1]
opr1_args = subtrees[2]
if opr1_args is None:
return arg1
opr1 = opr1_args.children[1]
right = opr1_args.children[2]
return _tree_insert_left(operator_type, opr1, arg1, right)
def _infixr(subtrees, action):
"Expands an action corresponding to an INFIXR declaration."
# arg1 opr arg2
operator_type = action[1]
arg1 = subtrees[1]
opr_arg2 = subtrees[2]
if opr_arg2 is None:
return arg1
opr = opr_arg2.children[1]
arg2 = opr_arg2.children[2]
return ASTNode([operator_type, opr, arg1, arg2],
arg1.pos_begin,
arg2.pos_end)
class ASTNode(position.ProgramElement):
"Represents an internal node of an abstract syntax tree."
def __init__(self, children, pos_begin, pos_end):
position.ProgramElement.__init__(self)
# A node is represented by a list
# - its first element is a label
# - the remaining elements are the actual children
self.children = children
self.pos_begin = pos_begin
self.pos_end = pos_end
# Used by the static analysis tool
self.live_in = None
self.live_out = None
self.live_gen = None
self.annotations = {}
def __repr__(self):
return 'AST(' + repr(self.children) + ')'
def clone(self):
""" Return a shallow copy of this ASTNode """
return copy.copy(self)
def first_child(self):
""" Return the first child, skipping None children that
may arise from optional elements in the grammar.
"""
for child in self.children[1:]:
if child is None:
continue
return child
return None
def description(self):
""" Return a string describing the AST node, with text
suitable for referring to the node in error reporting or other
human interface.
"""
if self.children == []:
return ''
head = self.children[0]
if isinstance(head, str):
if head in i18n.AST_type_descriptions:
return i18n.AST_type_descriptions[head]
else:
return head
else:
return head.description()
def has_children(self):
"Return True (every internal AST node has children)."
return True
def show_liveness_info(self):
"""Return a string, showing liveness annotations made at
this node by the static analysis tool.
"""
live_in = ''
if self.live_in is not None:
live_in = ''.join([
'{live_in: ',
str(' '.join(utils.seq_sorted(self.live_in.keys()))),
'}'
])
live_out = ''
if self.live_out is not None:
live_out = ''.join([
'{live_out: ',
str(' '.join(utils.seq_sorted(self.live_out.keys()))),
'}'
])
return live_in, live_out
def json(self, sort_keys=True, indent=2):
""" Returns a JSON representation of the ASTNode """
return json.dumps(self.dict(), sort_keys=sort_keys, indent=indent)
def dict(self):
""" Returns a dict representation of the ASTNode """
return { "children" : [self.child_to_dict(child) for child in self.children],
"position_begin" : str(self.pos_begin),
"position_end" : str(self.pos_end) }
def child_to_dict(self, child):
if isinstance(child, ASTNode):
return child.dict()
else:
return str(child)
def show_ast(self, indent=0, show_liveness=False):
"""Return a string, result of pretty printing the full
AST starting at this node, with the given indentation.
If show_liveness is True, also show the annotations made
at nodes by the static analysis tool.
"""
def pretty_print(elem, i=indent + 1):
"Pretty print an AST |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/decoding/transformer.py | Python | bsd-3-clause | 19,949 | 0.000201 | # Authors: Mainak Jas <mainak@neuro.hut.fi>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Romain Trachel <trachelr@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from .mixin import TransformerMixin
from .. import pick_types
from ..filter import (low_pass_filter, high_pass_filter, band_pass_filter,
band_stop_filter)
from ..time_frequency import multitaper_psd
from ..externals import six
from ..utils import _check_type_picks, deprecated
class Scaler(TransformerMixin):
"""Standardizes data across channels
Parameters
----------
info : instance of Info
The measurement info
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
Attributes
----------
info : instance of Info
The measurement info
ch_mean_ : dict
The mean value for each channel type
std_ : dict
The standard deviation for each channel type
"""
def __init__(self, info, with_mean=True, with_std=True):
self.info = info
self.with_mean = with_mean
self.with_std = with_std
self.ch_mean_ = dict() # TODO rename attribute
self.std_ = dict() # TODO rename attribute
def fit(self, epochs_data, y):
"""Standardizes data across channels
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data to concatenate channels.
y : array, shape (n_epochs,)
The label for each epoch.
Returns
-------
self : instance of Scaler
Returns the modified instance.
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
X = np.atleast_3d(epochs_data)
picks_list = dict()
picks_list['mag'] = pick_types(self.info, meg='mag', ref_meg=False,
exclude='bads')
picks_list['grad'] = pick_types(self.info, meg='grad', ref_meg=False,
exclude='bads')
picks_list['eeg'] = pick_types(self.info, eeg='grad', ref_meg=False,
exclude='bads')
self. | picks_list_ = picks_list
for key, this_pick in picks_list.items():
if self.with_mean:
ch_mean = X[:, thi | s_pick, :].mean(axis=1)[:, None, :]
self.ch_mean_[key] = ch_mean # TODO rename attribute
if self.with_std:
ch_std = X[:, this_pick, :].mean(axis=1)[:, None, :]
self.std_[key] = ch_std # TODO rename attribute
return self
def transform(self, epochs_data, y=None):
"""Standardizes data across channels
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
y : None | array, shape (n_epochs,)
The label for each epoch.
If None not used. Defaults to None.
Returns
-------
X : array, shape (n_epochs, n_channels, n_times)
The data concatenated over channels.
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
X = np.atleast_3d(epochs_data)
for key, this_pick in six.iteritems(self.picks_list_):
if self.with_mean:
X[:, this_pick, :] -= self.ch_mean_[key]
if self.with_std:
X[:, this_pick, :] /= self.std_[key]
return X
def inverse_transform(self, epochs_data, y=None):
""" Inverse standardization of data across channels
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
y : None | array, shape (n_epochs,)
The label for each epoch.
If None not used. Defaults to None.
Returns
-------
X : array, shape (n_epochs, n_channels, n_times)
The data concatenated over channels.
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
X = np.atleast_3d(epochs_data)
for key, this_pick in six.iteritems(self.picks_list_):
if self.with_mean:
X[:, this_pick, :] += self.ch_mean_[key]
if self.with_std:
X[:, this_pick, :] *= self.std_[key]
return X
class EpochsVectorizer(TransformerMixin):
"""EpochsVectorizer transforms epoch data to fit into a scikit-learn pipeline.
Parameters
----------
info : instance of Info
The measurement info.
Attributes
----------
n_channels : int
The number of channels.
n_times : int
The number of time points.
"""
def __init__(self, info=None):
self.info = info
self.n_channels = None
self.n_times = None
def fit(self, epochs_data, y):
"""For each epoch, concatenate data from different channels into a single
feature vector.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data to concatenate channels.
y : array, shape (n_epochs,)
The label for each epoch.
Returns
-------
self : instance of ConcatenateChannels
returns the modified instance
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
return self
def transform(self, epochs_data, y=None):
"""For each epoch, concatenate data from different channels into a single
feature vector.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
y : None | array, shape (n_epochs,)
The label for each epoch.
If None not used. Defaults to None.
Returns
-------
X : array, shape (n_epochs, n_channels * n_times)
The data concatenated over channels
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
epochs_data = np.atleast_3d(epochs_data)
n_epochs, n_channels, n_times = epochs_data.shape
X = epochs_data.reshape(n_epochs, n_channels * n_times)
# save attributes for inverse_transform
self.n_epochs = n_epochs
self.n_channels = n_channels
self.n_times = n_times
return X
def inverse_transform(self, X, y=None):
"""For each epoch, reshape a feature vector into the original data shape
Parameters
----------
X : array, shape (n_epochs, n_channels * n_times)
The feature vector concatenated over channels
y : None | array, shape (n_epochs,)
The label for each epoch.
If None not used. Defaults to None.
Returns
-------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The original data
"""
if not isinstance(X, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(X))
return X.reshape(-1, self.n_channels, self.n_times)
@deprecated("Class 'ConcatenateChannels' has been renamed to "
"'EpochsVectorizer' and will be removed in release 0.11.")
class ConcatenateChannels(EpochsVectorizer):
pass
class PSDEstimator(TransformerMixin):
"""Comp |
Safuya/python_3_essential_training | 07 Loops/loop_control.py | Python | gpl-3.0 | 223 | 0.004484 | def main():
s = 'this is a string'
for c in s:
print(c, end='')
else:
print(' else') |
print()
for i in range(10, 0, -1):
print(i, end=' ')
if __name__ == "__main__":
main( | )
|
3dfxsoftware/cbss-addons | account_partner_ledger_report/__openerp__.py | Python | gpl-2.0 | 1,670 | 0.001796 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2 | 009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your opti | on) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Partner Ledger Report',
'version': '1.0',
'author': 'CLEARCORP S.A.',
'category': 'Accounting & Finance',
'description': """
Partner Ledger Report.
==============================
This module modifies the partner ledger report """,
'website': 'http://clearcorp.co.cr',
'depends': ['account','account_report_lib',],
'data': [
'security/ir.model.access.csv',
'wizard/account_partner_ledger_report_wizard_view.xml',
'report/report.xml',
'report_menus.xml',
],
'active': False,
'installable': True,
'license': 'AGPL-3',
}
|
washbz250/LearnPythonTheHardWay | Python3/Projects/hi_lo.py | Python | unlicense | 1,616 | 0.001238 | import random
is_alive = True
while is_alive is True:
guess_right = False
num = random.randrange(1, 100)
guess = 0
guess_num = 0
print("Hi-Lo guessing game")
print("I came up with a number between 1 and 100")
print("You have to guess the number. If your guess is higher, I will tell you so. ")
print("If your guess is lower, I will tell you so.\n")
while guess_right is False:
try:
guess = int(input("Type your guess here: "))
if guess > num:
print("Your guess is higher than the number.\n")
guess_right = False
guess_num += 1
elif guess < num:
print("Your guess is lower than the number.\n")
guess_right = False
guess_num += 1
elif guess is num:
print("You win! You guessed the right number.\n")
print("Thanks for playing!")
guess_num += 1
print("It took you", guess_num, "guesses to get the number.\n")
guess_right = True
else:
print("Please type a real number.")
except ValueError:
print("Type in a whole number, no decimals or characters.")
print("\nWould you like to play again?")
restart = int(input("Yes (Press 1) or No (Press 2)? "))
try:
| if restart is 1:
print("Playing again...")
elif restart is 2:
| print("Quitting. Thanks for playing")
is_alive = False
except ValueError:
print("Press 1 to restart, 2 to quit.") |
21strun/django-coverage | assets/badges/drone.io/badge_maker.py | Python | apache-2.0 | 1,424 | 0.00632 | """
Drone.io badge generator.
Currently set up to work on Mac.
Requires Pillow.
"""
import os
fro | m PIL import Image, ImageDraw, ImageFont
SIZE = (95, 18)
def hex_colour(hex):
if hex[0] == '#':
hex = hex[1:]
return (
int(hex[:2], 16),
int(hex[2:4], 16),
int(hex[4:6], 16),
)
BACKGROUND = hex_colour('#4A4A4A')
SUCCESS = hex_colour('#94B944')
WARNING = hex_colour('#E4A83C')
ERROR = hex_colour('#B10610')
SUCCESS_CUTOFF = 85
WARNING_CUTOFF = 45
FONT = Image | Font.truetype(size=10, filename="/Library/Fonts/Arial.ttf")
FONT_SHADOW = hex_colour('#525252')
PADDING_TOP = 3
def build_image(percentage, colour):
image = Image.new('RGB', SIZE, color=BACKGROUND)
drawing = ImageDraw.Draw(image)
drawing.rectangle([(55, 0), SIZE], colour, colour)
drawing.text((8, PADDING_TOP+1), 'coverage', font=FONT, fill=FONT_SHADOW)
drawing.text((7, PADDING_TOP), 'coverage', font=FONT)
drawing.text((63, PADDING_TOP+1), '%s%%' % percentage, font=FONT, fill=FONT_SHADOW)
drawing.text((62, PADDING_TOP), '%s%%' % percentage, font=FONT)
return image
os.chdir('_build')
for i in range(101):
filename = '%i.png' % i
file = open(filename, 'wb')
if i < WARNING_CUTOFF:
build_image(i, ERROR).save(file)
elif i < SUCCESS_CUTOFF:
build_image(i, WARNING).save(file)
else:
build_image(i, SUCCESS).save(file)
|
baigk/compass-core | compass/deployment/utils/__init__.py | Python | apache-2.0 | 638 | 0 | # Copyright 2 | 014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for t | he specific language governing permissions and
# limitations under the License.
__author__ = "Grace Yu (grace.yu@huawei.com)"
|
nimadini/Teammate | handlers/home/upload_url.py | Python | apache-2.0 | 600 | 0.003333 | __author__ = 'stanley'
from domain.user import *
from google.appengine.api import users
from google.appengine.ext import blobstore
import webapp2
import json
class Upl | oadURLHandler(webapp2.RequestHandler):
def post(self):
usr = user_key(users.get_current_user().email()).get()
if usr is None:
return # TODO
cover_upload_url = blobstore.create_upload_url('/upload')
self.response.headers['Content-Type'] = 'application/json'
result = json.dumps({'successful': True, 'cover_upload_url': co | ver_upload_url})
self.response.write(result) |
PagueVeloz/ClientAPI | python/pagueveloz/api/v3.py | Python | mit | 295 | 0.006849 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from base import PagueVelozCl | ient
VERSAO = 'v3'
class Boleto(PagueVelozClient):
'''Serviço de emissão e consulta de Boletos Bancários.'''
def __init__(self, chave=None):
PagueVelozClient.__init__(self, VERSAO, 'Boleto', chave | ) |
jsha/letsencrypt | certbot/plugins/common.py | Python | apache-2.0 | 16,182 | 0.001051 | """Plugin common functions."""
import logging
import os
import re
import shutil
import tempfile
import OpenSSL
import pkg_resources
import zope.interface
from acme.jose import util as jose_util
from certbot import constants
from certbot import crypto_util
from certbot import errors
from certbot import interfaces
from certbot import reverter
from certbot import util
logger = logging.getLogger(__name__)
def option_namespace(name):
"""ArgumentParser options namespace (prefix of all options)."""
return name + "-"
def dest_namespace(name):
"""ArgumentParser dest namespace (prefix of all destinations)."""
return name.replace("-", "_") + "_"
private_ips_regex = re.compile(
r"(^127\.0\.0\.1)|(^10\.)|(^172\.1[6-9]\.)|"
r"(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^192\.168\.)")
hostname_regex = re.compile(
r"^(([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])\.)*[a-z]+$", re.IGNORECASE)
@zope.interface.implementer(interfaces.IPlugin)
class Plugin(object):
"""Generic plugin."""
# provider is not inherited, subclasses must define it on their own
# @zope.interface.provider(interfaces.IPluginFactory)
def __init__(self, config, name):
self.config = config
self.name = name
@jose_util.abstractclassmethod
def add_parser_arguments(cls, add):
"""Add plugin arguments to the CLI argument parser.
NOTE: If some of your flags interact with others, you can
use cli.report_config_interaction to register this to ensure
values are correctly saved/overridable during renewal.
:param callable add: Function that proxies calls to
`argparse.ArgumentParser.add_argument` prepending options
with unique plugin name prefix.
"""
@classmethod
def inject_parser_options(cls, parser, name):
"""Inject parser options.
See `~.IPlugin.inject_parser_options` for docs.
"""
# dummy function, doesn't check if dest.startswith(self.dest_namespace)
def add(arg_name_no_prefix, *args, **kwargs):
# pylint: disable=missing-docstring
return parser.add_argument(
"--{0}{1}".format(option_namespace(name), arg_name_no_prefix),
*args, **kwargs)
return cls.add_parser_arguments(add)
@property
def option_namespace(self):
"""ArgumentParser options namespace (prefix of all options)."""
return option_namespace(self.name)
def option_name(self, name):
"""Option name (include plugin namespace)."""
return self.option_namespace + name
@property
def dest_namespace(self):
"""ArgumentParser dest namespace (prefix of all destinations)."""
return dest_namespace(self.name)
def dest(self, var):
"""Find a destination for given variable ``var``."""
# this should do exactly the same what ArgumentParser(arg),
# does to "arg" to compute "dest"
return self.dest_namespace + var.replace("-", "_")
def conf(self, var):
"""Find a configuration value for variable ``var``."""
return getattr(self.config, self.dest(var))
# other
class Installer(Plugin):
"""An installer base class with reverter and ssl_dhparam methods defined.
Installer plugins do not have to inherit from this class.
"""
def __init__(self, *args, **kwargs):
super(Installer, self).__init__(*args, **kwargs)
self.reverter = reverter.Reverter(self.config)
def add_to_checkpoint(self, save_files, save_notes, temporary=False):
"""Add files to a checkpoint.
:param set save_files: set of filepaths to save
:param str save_notes: notes about changes during the save
:param bool temporary: True if the files should be added to a
temporary checkpoint rather than a permanent one. This is
usually used for changes that will soon be reverted.
:raises .errors.PluginError: when unable to add to checkpoint
"""
if temporary:
checkpoint_func = self.reverter.add_to_temp_checkpoint
else:
checkpoint_func = self.reverter.add_to_checkpoint
try:
checkpoint_func(save_files, save_notes)
except errors.ReverterError as err:
raise errors.PluginError(str(err))
def finalize_checkpoint(self, title):
"""Timestamp and save changes made through the reverter.
:param str title: Title describing checkpoint
:raises .errors.PluginError: when an error occurs
"""
try:
self.reverter.finalize_checkpoint(title)
except errors.ReverterError as err:
raise errors.PluginError(str(err))
def recovery_routine(self):
"""Revert all previously modified files.
Reverts all modified files that have not been saved as a checkpoint
:raises .errors.PluginError: If unable to recover the configuration
"""
try:
self.reverter.recovery_routine()
except errors.ReverterError as err:
raise errors.PluginError(str(err))
def revert_temporary_config(self):
"""Rollback temporary checkpoint.
:raises .errors.PluginError: when unable to revert config
"""
try:
self.reverter.revert_temporary_config()
except errors.ReverterError as err:
raise errors.PluginError(str(err))
def rollback_checkpoints(self, rollback=1):
"""Rollback saved checkpoints.
:param int rollback: Number of checkpoints to revert
:raises .errors.PluginError: If there is a problem with the input or
the function is unable to correctly revert the configuration
"""
try:
self.reverter.rollback_checkpoints(rollback)
except errors.ReverterError as err:
raise errors.PluginError(str(err))
def view_config_changes(self):
"""Show all of the configuration changes that have taken place.
:raises .errors.PluginError: If there is a problem while processing
the checkpoints directories.
"""
try:
self.revert | er.view_config_changes()
except errors.ReverterError as err:
raise errors.PluginError(str(err))
@property
def ssl_dhparams(self):
"""Full absolute path to ssl_dhparams file."""
return os.path.join(self.config.config_dir, constants.SSL_DHPARAMS_DEST)
@property
def updated_ssl_dhparams_digest(self):
"""Full absolute path to digest of updated ssl_dhparam | s file."""
return os.path.join(self.config.config_dir, constants.UPDATED_SSL_DHPARAMS_DIGEST)
def install_ssl_dhparams(self):
"""Copy Certbot's ssl_dhparams file into the system's config dir if required."""
return install_version_controlled_file(
self.ssl_dhparams,
self.updated_ssl_dhparams_digest,
constants.SSL_DHPARAMS_SRC,
constants.ALL_SSL_DHPARAMS_HASHES)
class Addr(object):
r"""Represents an virtual host address.
:param str addr: addr part of vhost address
:param str port: port number or \*, or ""
"""
def __init__(self, tup, ipv6=False):
self.tup = tup
self.ipv6 = ipv6
@classmethod
def fromstring(cls, str_addr):
"""Initialize Addr from string."""
if str_addr.startswith('['):
# ipv6 addresses starts with [
endIndex = str_addr.rfind(']')
host = str_addr[:endIndex + 1]
port = ''
if len(str_addr) > endIndex + 2 and str_addr[endIndex + 1] == ':':
port = str_addr[endIndex + 2:]
return cls((host, port), ipv6=True)
else:
tup = str_addr.partition(':')
return cls((tup[0], tup[2]))
def __str__(self):
if self.tup[1]:
return "%s:%s" % self.tup
return self.tup[0]
def normalized_tuple(self):
"""Normalized representation of addr/port tuple
"""
if self.ipv6:
return (self.get_ipv6_exploded(), self.tup[1])
return se |
HiSPARC/publicdb | publicdb/inforecords/migrations/0011_remove_pulseheightthresholds.py | Python | gpl-3.0 | 507 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-07-22 10:50
from __future__ import unicode_literals
from django.db import migrations |
class Migration(migrations.Migration):
dependencies = [
('inforecords', '0010_related_names'),
]
operations = [
migrations.RemoveField(
| model_name='monitorpulseheightthresholds',
name='station',
),
migrations.DeleteModel(
name='MonitorPulseheightThresholds',
),
]
|
beni55/sentry | src/sentry/migrations/0068_auto__add_field_projectkey_user_added__add_field_projectkey_date_added.py | Python | bsd-3-clause | 20,383 | 0.008242 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ProjectKey.user_added'
db.add_column('sentry_projectkey', 'user_added',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='keys_added_set', null=True, to=orm['auth.User']))
# Adding field 'ProjectKey.date_added'
db.add_column('sentry_projectkey', 'date_added',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, null=True))
def backwards(self, orm):
# Deleting field 'ProjectKey.user_added'
db.delete_column('sentry_projectkey', 'user_added_id')
# Deleting field 'ProjectKey.date_added'
db.delete_column('sentry_projectkey', 'date_added')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
' | sentry.filterkey': {
'Meta': {'uniq | ue_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ( |
Qirky/PyKinectTk | PyKinectTk/utils/3D.py | Python | gpl-3.0 | 2,821 | 0.008862 | """
3D.py is the interface for plotting Skeleton Wireframes in a 3D
perspective using matplotlib
"""
# 3D Plotting tool
from matplotlib import pyplot as plt
from matplotlib import animation
from mpl_toolkits.mplot3d import Axes3D
# PyKinect XEF modules
import Colour
#: 3D View Obj
class View:
def __init__(self, bodies, **kwargs):
""" data is a from Load.BodyData """
# Performance data
self.bodies = bodies
# Setup Plot
self.fig = plt.figure()
self.axis = self.fig.add_subplot(111, projection='3d')
self.axis.axis('on')
# Define the lines being used
kwargs['marker'] = '.'
kwargs['ms'] = 5
self.lines = []
for i, body in enumerate(self.bodies):
body_lines = []
for joint in body:
for child in joint.children():
body_lines += self.axis.plot([],[],[], c = Colour.plt[i], **kwargs)
self.lines.append(body_lines)
# Settings
self.axis.set_ylim(-2,2)
self.axis.set_xlim(0,4)
self.axis.set_zlim(-2,2)
self.rate = 1.0 / 3.0
def update(self, frame):
""" Used to animate the drawing - passes a blitting=True argument to draw() """
self.draw(frame, blitting=True)
return self.lines
def draw(self, frame, blitting=False):
""" Draw the bodies at this frame """
for body in range(len(self.bodies)):
| try:
self.draw_body(body, frame)
except:
pass
if blitting:
self.fig.canvas.blit()
else:
self.display()
| return
@staticmethod
def display():
plt.show()
def draw_body(self, body, frame):
""" Draws one body at frame """
bone = 0
for joint in self.bodies[body]:
for start, end in joint.bones_3D(frame):
self.draw_bone(body, start, end, bone)
bone += 1
def draw_bone(self, body, a, b, i):
""" Draws update line i to draw a line between a and b """
# Re-order axes
# Kinect Axis Z is depth (matplot X)
# Kinect Axis Y is height (matplot Z)
# Kinect Axis X is width (matplot y)
y, z, x = [(a[n],b[n]) for n in range(3)]
self.lines[body][i].set_data(x, y)
self.lines[body][i].set_3d_properties(z)
def animate(view):
""" Takes a 3D.View object and 'plays' the frames """
try:
mov = animation.FuncAnimation(view.fig, view.update, interval=view.rate, blit=False)
view.display()
except:
pass
if __name__ == "__main__":
# debug
import Load
a = View(Load.BodyData(5))
animate(a)
|
eske/seq2seq | scripts/post_editing/select-by-ter.py | Python | apache-2.0 | 1,303 | 0.00307 | #!/usr/bin/env python3
import argparse
import numpy as np
import random
import sys
parser = argparse.ArgumentParser()
parser.add_argument('ref_vectors')
parser.add_argument('vectors')
parser.add_argument('-n', type=int, default=500000)
parser.add_argument('-k', type=int, default=1)
parser.add_argument('-m', type=int, default=1000)
if __name__ == '__main__':
args = parser.parse_args()
with open(args.ref_vectors) as f:
ref_vectors = [np.array([float(x) for x in line.split(',')]) for line in f]
with open(args.vectors) as f:
vectors = [np.array([float(x) for x in line.split(',')]) for line in f]
vectors = list(enumerate(vectors))
n = 0
l = len(vectors)
while n < args.n and l > 0:
vector = ref_vectors[n % len(ref_vectors)]
n += 1
def key(i):
return np.sum((vector - vectors[i][1]) ** 2)
indices = random.sample(range | (l), k=args.m)
if args.k > 1:
indices = sorted(indices, key=key)[:args.k]
else:
indices = [min(indices, key=key)]
for i in indices:
sys.stdout.write(str(vectors[i][0]) + '\n')
#sys.stdout.flush()
for i in indices:
vectors[i], vecto | rs[l - 1] = vectors[l - 1], vectors[i]
l -= 1
|
mthipparthi/parliament-search | parliamentsearch/pipelines.py | Python | mit | 1,789 | 0.001118 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.conf import settings
import pymongo
from datetime import datetime
from .models import PQDataModel
class ParliamentSearchPipeline(object):
def __init__(self):
self.connection = None
def process_item(self, items, spider):
if spider.name == "ls_questions":
questions = items['questions']
# self.insert_in_db(questions)
else:
raise ValueError("Invalid collection:", spider.name)
return items
def insert_in_db(self, questions):
with PQDataModel.batch_write() as batch:
records = []
for q in questions:
record = PQDataModel()
record.question_number = q['question_number']
record.question_origin = q['question_origin']
record.question_type = q['question_type']
record.question_session = q['question_session']
record.question_ministry = q['question_ministry']
record.question_member = q['question_member']
record.question_subject = q['question_subject']
record.question_type = q['question_type']
record.question_annex = q['question_annex']
record.question_url = q['question_url']
record.question_text = q['question_text']
| record.question_url = q['question_url']
record.question_date = datetime.strptime(q['question_date'], '%d.%m.%Y')
records.append(record)
for record in records:
batch.save(record)
| |
henryneu/Python | sample/mydict2.py | Python | apache-2.0 | 836 | 0.024155 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 文档测试
class Dict(dict):
'''
Simple dict but also support access as x.y style
>>> d1 = Dict()
>>> d1['x'] = 100
>>> d1.x
100
>>> d1.y = 200
>> | > d1['y']
| 200
>>> d2 = Dict(a=1, b=2, c='3')
>>> d2.c
'3'
>>> d2['empty']
Traceback (most recent call last):
...
KeyError: 'empty'
>>> d2.empty
Traceback (most recent call last):
...
AttributeError: 'Dict' object has no attribute 'empty'
'''
def __init__(self, **kw):
super().__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
if __name__ == '__main__':
import doctest
doctest.testmod() |
edeposit/edeposit.amqp.ltp | src/edeposit/amqp/ltp/amqp_structures/responses.py | Python | mit | 725 | 0 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
from collections import namedtuple
# Functions & classes ================================== | =======================
class TrackingResponse(namedtuple("TrackingResponse", ["book_id",
"exported",
"error"])):
"""
Result of the :class:`.TrackingRequest`.
Attributes:
book_id (str): ID of the book.
exported (bool): True if the book was successfull | y exported.
error (str): Type of error if the export failed.
"""
|
openstack/designate | designate/tests/unit/test_context.py | Python | apache-2.0 | 3,468 | 0 | # Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import testtools
from designate import context
from designate import exceptions
from designate import policy
import designate.tests
class TestDesignateContext(designate.tests.TestCase):
def test_deepcopy(self):
orig = context.DesignateContext(
user_id='12345', project_id='54321'
)
copy = orig.deepcopy()
self.assertEqual(orig.to_dict(), copy.to_dict())
def test_tsigkey_id_override(self):
orig = context.DesignateContext(
tsigkey_id='12345', project_id='54321'
)
copy = orig.to_dict()
self.assertEqual('TSIG:12345 54321 - - -', copy['user_identity'])
def test_elevated(self):
ctxt = context.DesignateContext(
user_id='12345', project_id='54321'
)
admin_ctxt = ctxt.elevated()
self.assertFalse(ctxt.is_admin)
self.assertTrue(admin_ctxt.is_admin)
self.assertEqual(0, len(ctxt.roles))
def test_elevated_with_show_deleted(self):
ctxt = context.DesignateContext(
user_id='12345', project_id='54321'
)
admin_ctxt = ctxt.elevated(show_deleted=True)
self.assertTrue(admin_ctxt.show_deleted)
def test_all_tenants(self):
ctxt = context.DesignateContext(
user_id='12345', project_id='54321'
)
admin_ctxt = ctxt.elevated()
admin_ctxt.all_tenants = True
self.assertFalse(ctxt.is_admin)
self.assertTrue(admin_ctxt.is_admin)
self.assertTrue(admin_ctxt.all_tenants)
def test_all_tenants_policy_failure(self):
ctxt = context.DesignateContext(
user_id='12345', project_id='54321'
)
with testtools.ExpectedException(exceptions.Forbidden):
ctxt.all_tenants = True
def test_edit_managed_records(self):
ctxt = context.DesignateContext(
user_id='12345', project_id='54321'
)
admin_ctxt = ctxt.elevated()
admin_ctxt.edit_managed_records = True
self.assertFalse(ctxt.is_admin)
self.assertTrue(admin_ctxt.is_admin)
self.assertTrue(admin_ctxt.edit_managed_records)
def test_edit_managed_records_failure(self):
ctxt = context.DesignateContext(
user_id='12345', project_id='54321'
)
with testtools.ExpectedException(exceptions.Forbidden):
ctxt.edit_managed_records = True
@mock.patch.object(policy, 'check')
def test_sudo(self, mock_policy_check):
ctxt = context.DesignateContext(
user_id='12345', project_id='old_project'
| )
ctxt.sudo('new_projec | t')
self.assertTrue(mock_policy_check.called)
self.assertEqual('new_project', ctxt.project_id)
self.assertEqual('old_project', ctxt.original_project_id)
|
Quantika14/pygtkcodebuffer | examples/simple-edit.py | Python | lgpl-3.0 | 2,136 | 0.015449 | #!/usr/bin/python
import gtk
import gtk.glade
import os
import glob
import os.path
import re
# not needed if you have codebuffer installed:
import sys
sys.path.insert(0, "..")
from gtkcodebuffer import CodeBuffer, SyntaxLoader, add_syntax_path
# also not needed if installed:
add_syntax_path("../syntax/")
class Editor:
def __init__(self):
self.__gladefile = os.path.join(os.path.dirname(__file__), "simpleedit.glade")
self.__xml = gtk.glade.XML(self.__gladefile, "mainwindow")
synmenu = self.__xml.get_widget("syntaxmenu")
lst = glob.glob("../syntax/*.xml")
lst.sort()
lst = map(lambda x: re.match("^(.+)\.xml$",os.path.basename(x)).group(1), lst)
for lang in lst:
item = gtk.MenuItem(lang, False)
synmenu.append(item)
item.connect("activate", self.on_lang_changed, lang)
self.__xml.get_widget("fileopen").connect("activate", self.on_open)
self.__xml.get_widget("helpinfo").connect("activate", self.on_show_info)
self.__xml.get_widget("mainwindow").show_all()
self.__xml.get_widget("mainwindow").connect("destroy",self.on_destroy)
self.__buffer = CodeBuffer(None)
self.__xml.get_widget("textview").set_buffe | r(self.__buffer)
def on_lang_changed(self, widget, lang):
lspec = SyntaxLoader(lang)
self.__buffer.reset_language(lspec)
def on_open(self, widget):
dlg = gtk.FileSelection("Open...")
if not dlg.run() == gtk.RESPONSE_OK:
dlg.destroy()
return
fname = dlg.get_filename()
dlg.destroy()
self.__buffer.set_text(open(fname, "r") | .read())
def on_show_info(self, widget):
dlg = gtk.glade.XML(self.__gladefile, "aboutdialog").get_widget("aboutdialog")
dlg.run()
dlg.destroy()
def on_destroy(self, widget):
gtk.main_quit()
if __name__ == '__main__':
edit = Editor()
gtk.main()
|
jimmyDunne/perimysium | perimysium/modeling.py | Python | bsd-3-clause | 34,048 | 0.001909 | # ----------------------------------------------------------------------- #
# The OpenSim API is a toolkit for musculoskeletal modeling and #
# simulation. See http://opensim.stanford.edu and the NOTICE file #
# for more information. OpenSim is developed at Stanford University #
# and supported by the US National Institutes of Health (U54 GM072970, #
# R24 HD065690) and by DARPA through the Warrior Web program. #
# #
# Copyright (c) 2005-2012 Stanford University and the Authors #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. | #
# You may obtain a copy of the License at #
# http://www.apache.org/licenses/LICENSE-2.0. #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT | WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. See the License for the specific language governing #
# permissions and limitations under the License. #
# ----------------------------------------------------------------------- #
#
# Authors: Christopher Dembia, Thomas Uchida
# Stanford University
"""Modifying OpenSim Model's, and any other OpenSim object.
"""
# TODO currently, this module assumes this package is installed via Jython, and
# that the OpenSim Jython wrapping is on the Jython CLASSPATH. When an OpenSim
# Python wrapping is available, we can use CPython instead.
import platform
def running_in_jython():
return platform.system() == 'Java'
if running_in_jython():
import org.opensim.modeling as osm
else:
import opensim as osm
def printobj(obj, fname):
if running_in_jython():
exec('obj.print(fname)')
else:
obj.printToXML(fname)
pi = 3.14159265359
def replace_thelen_muscles_with_millardequilibrium_muscles(model):
"""Edits the given model:
1. Removes all Thelen muscles.
2. Replaces them with Millard equilibrium muscles.
"""
# 1) Loop through all the forces in the model. Copy them over to a new
# ForceSet, unless it's a Thelen muscle. If it's a Thelen muscle, create a
# similar Millard muscle.
fset = osm.ForceSet()
for i_force in range(model.getForceSet().getSize()):
force = model.getForceSet().get(i_force)
old = osm.Thelen2003Muscle.safeDownCast(force)
# If not a Thelen muscle, just clone it.
if not old:
fset.cloneAndAppend(force)
else:
new = osm.Millard2012EquilibriumMuscle()
new.setName(old.getName())
# GeometryPath.
# --------------
# TODO geometry path wrap, visible object, ...
old_geopath = old.getGeometryPath()
new_geopath = new.updGeometryPath()
for i_pt in range(old_geopath.getPathPointSet().getSize()):
old_pt = old_geopath.getPathPointSet().get(i_pt)
new_geopath.updPathPointSet().cloneAndAppend(old_pt)
# Parameters.
# -----------
def transfer(new, old, name):
exec('new.set_%s(old.get_%s())' % (name, name))
transfer(new, old, 'max_isometric_force')
transfer(new, old, 'optimal_fiber_length')
transfer(new, old, 'tendon_slack_length')
transfer(new, old, 'pennation_angle_at_optimal')
transfer(new, old, 'max_contraction_velocity')
transfer(new, old, 'activation_time_constant')
transfer(new, old, 'deactivation_time_constant')
transfer(new, old, 'default_activation')
transfer(new, old, 'default_fiber_length')
fset.cloneAndAppend(new)
# 2) clearAndDestroy the model's ForceSet().
model.updForceSet().clearAndDestroy()
# 3) Add all forces from the new ForceSet to the model.
for i_force in range(fset.getSize()):
model.updForceSet().cloneAndAppend(fset.get(i_force))
def control_set_from_storage_files(sto_list):
"""
Parameters
----------
sto_list: list of org.opensim.modeling.Storage's
Each column is written to a single ControlSet, as ControlLinear's.
Returns
-------
cset : org.opensim.modeling.ControlSet
"""
# TODO documentation.
cset = osm.ControlSet()
for sto in sto_list:
this_cset = osm.ControlSet(sto)
for i_control in range(this_cset.getSize()):
this_clin = osm.ControlLinear.safeDownCast(this_cset.get(i_control))
this_clin.setName(this_clin.getName() + ".excitation")
this_clin.setExtrapolate(False)
# Undo the default that makes sense for muscles.
this_clin.setDefaultParameterMin(-10000.0)
this_clin.setDefaultParameterMax(10000.0)
for i_param in range(this_clin.getNumParameters()):
this_clin.setControlValueMin(
this_clin.getParameterTime(i_param),
this_clin.getParameterValue(i_param))
this_clin.setControlValueMax(
this_clin.getParameterTime(i_param),
this_clin.getParameterValue(i_param))
cset.cloneAndAppend(this_clin)
return cset
def storage2piecewise_linear_function(sto, column_name, scale_factor=None):
"""Returns a column from a storage file as an
org.opensim.modeling.PiecewiseLinearFunction. We advise that, once you get
the function, you name it.
Parameters
----------
sto : org.opensim.modeling.Storage
An OpenSim Storage object.
column_name : str
Name of a column in `sto`.
scale_factor : float, optional
Scale the column data before placing it in the function.
Returns
-------
plf : org.opensim.modeling.PiecewiseLinearFunction
Just like you asked for.
"""
time = osm.ArrayDouble()
sto.getTimeColumn(time)
state_index = sto.getStateIndex(column_name)
if type(scale_factor) == float:
sto.multiplyColumn(state_index, scale_factor)
elif scale_factor == None:
pass
else:
raise Exception('scale_factor, if specified, must be a float.')
ordinate = osm.ArrayDouble()
sto.getDataColumn(state_index, ordinate)
return osm.PiecewiseLinearFunction(time.getSize(), time.get(),
ordinate.get())
twitch_ratios_2392 = {
'glut_med1': 0.55, 'glut_med2': 0.55, 'glut_med3': 0.55,
'glut_min1': 0.55, 'glut_min2': 0.55, 'glut_min3': 0.55,
'semimem': 0.4925, 'semiten': 0.425,
'bifemlh': 0.5425, 'bifemsh': 0.529,
'add_mag1': 0.552, 'add_mag2': 0.552, 'add_mag3': 0.552,
'add_mag4': 0.552, # Chris' change to Apoorva's model.
'glut_max1': 0.55, 'glut_max2': 0.55, 'glut_max3': 0.55,
'iliacus': 0.5, 'psoas': 0.5, 'rect_fem': 0.3865,
'vas_med': 0.503, 'vas_int': 0.543, 'vas_lat': 0.455,
'med_gas': 0.566, 'lat_gas': 0.507, 'soleus': 0.803,
'tib_post': 0.6, 'flex_dig': 0.6, 'flex_hal': 0.6, 'tib_ant': 0.7,
'per_brev': 0.6, 'per_long': 0.6, 'per_tert': 0.75,
'ext_dig': 0.75, 'ext_hal': 0.75,
'ercspn': 0.6, 'intobl': 0.56, 'extobl': 0.58,
'sar': -1, 'add_long': -1, 'add_brev': -1,
'tfl': -1, 'pect': -1, 'grac': -1,
'quad_fem': -1, 'gem': -1, 'peri': -1}
twitch_ratios_1018 = {
'hamstrings': 0.49, 'bifemsh': 0.53, 'glut_max': 0.55,
'iliopsoas': 0.50, 'rect_fem': 0.39, 'vasti': 0.50,
'gastroc': 0.54, 'soleus': 0.80,
'tib_ant': 0.70}
# For the muscles that are divided in OpenSim across multiple paths,
# divide the publis |
chubbymaggie/claripy | tests/test_balancer.py | Python | bsd-2-clause | 4,706 | 0.009137 | import claripy
def test_complex_guy():
guy_wide = claripy.widen(
claripy.union(
claripy.union(
claripy.union(claripy.BVV(0L, 32), claripy.BVV(1, 32)),
claripy.union(claripy.BVV(0L, 32), claripy.BVV(1, 32)) + claripy.BVV(1, 32)
),
claripy.union(
claripy.union(claripy.BVV(0L, 32), claripy.BVV(1, 32)),
claripy.union(claripy.BVV(0L, 32), claripy.BVV(1, 32)) + claripy.BVV(1, 32)
) + claripy.BVV(1, 32)
),
claripy.union(
claripy.union(
claripy.union(
claripy.union(claripy.BVV(0L, 32), claripy.BVV(1, 32)),
claripy.union(claripy.BVV(0L, 32), claripy.BVV(1, 32)) + claripy.BVV(1, 32)
),
claripy.union(
claripy.union(claripy.BVV(0L, 32), claripy.BVV(1, 32)),
claripy.union(claripy.BVV(0L, 32), claripy.BVV(1, 32)) + claripy.BVV(1, 32)
) + claripy.BVV(1, 32)
),
claripy.union(
claripy.union(
claripy.union(claripy.BVV(0L, 32), claripy.BVV(1, 32)),
claripy.union(claripy.BVV(0L, 32), claripy.BVV(1, 32)) + claripy.BVV(1, 32)
),
claripy.union(
claripy.union(claripy.BVV(0L, 32), claripy.BVV(1, 32)),
claripy.union(claripy.BVV(0L, 32), claripy.BVV(1, 32)) + claripy.BVV(1, 32)
) + claripy.BVV(1, 32)
) + claripy.BVV(1, 32)
)
)
guy_inc = guy_wide + claripy.BVV(1, 32)
guy_zx = claripy.ZeroExt(32, guy_inc)
s,r = claripy.balancer.Balancer(claripy.backends.vsa, guy_inc <= claripy.BVV(39, 32)).compat_ret
assert s
assert r[0][0] is guy_wide
assert claripy.backends.vsa.min(r[0][1]) == 0
assert set(claripy.backends.vsa.eval(r[0][1], 1000)) == set([4294967295] + range(39))
s,r = claripy.balancer.Balancer(claripy.backends.vsa, guy_zx <= claripy.BVV(39, | 64)).compat_ret
assert r[0][0] is guy_wide
assert claripy.backends.vsa.min(r[0][1]) == 0
assert set(claripy.backends.vsa.eval(r[0][1], 1000)) == set([4294967295] + range(39))
def test_simple_guy():
x = claripy.BVS('x', 32)
s,r = claripy.balancer.Balancer(claripy.backends.vsa, x <= claripy.BVV(39, 32)).compat_ret
assert s
assert r[0][0] | is x
assert claripy.backends.vsa.min(r[0][1]) == 0
assert claripy.backends.vsa.max(r[0][1]) == 39
s,r = claripy.balancer.Balancer(claripy.backends.vsa, x + 1 <= claripy.BVV(39, 32)).compat_ret
assert s
assert r[0][0] is x
all_vals = r[0][1]._model_vsa.eval(1000)
assert len(all_vals)
assert min(all_vals) == 0
assert max(all_vals) == 4294967295
all_vals.remove(4294967295)
assert max(all_vals) == 38
def test_widened_guy():
w = claripy.widen(claripy.BVV(1, 32), claripy.BVV(0, 32))
s,r = claripy.balancer.Balancer(claripy.backends.vsa, w <= claripy.BVV(39, 32)).compat_ret
assert s
assert r[0][0] is w
assert claripy.backends.vsa.min(r[0][1]) == 0
assert claripy.backends.vsa.max(r[0][1]) == 1 # used to be 39, but that was a bug in the VSA widening
s,r = claripy.balancer.Balancer(claripy.backends.vsa, w + 1 <= claripy.BVV(39, 32)).compat_ret
assert s
assert r[0][0] is w
assert claripy.backends.vsa.min(r[0][1]) == 0
all_vals = r[0][1]._model_vsa.eval(1000)
assert set(all_vals) == set([4294967295, 0, 1])
def test_overflow():
x = claripy.BVS('x', 32)
print "x + 10 <= 20"
s,r = claripy.balancer.Balancer(claripy.backends.vsa, x + 10 <= claripy.BVV(20, 32)).compat_ret
#mn,mx = claripy.backends.vsa.min(r[0][1]), claripy.backends.vsa.max(r[0][1])
assert s
assert r[0][0] is x
assert set(claripy.backends.vsa.eval(r[0][1], 1000)) == set([4294967286, 4294967287, 4294967288, 4294967289, 4294967290, 4294967291, 4294967292, 4294967293, 4294967294, 4294967295, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
#print "0 <= x + 10"
#s,r = claripy.balancer.Balancer(claripy.backends.vsa, 0 <= x + 10).compat_ret
#assert s
#assert r[0][0] is x
print "x - 10 <= 20"
s,r = claripy.balancer.Balancer(claripy.backends.vsa, x - 10 <= claripy.BVV(20, 32)).compat_ret
assert s
assert r[0][0] is x
assert set(claripy.backends.vsa.eval(r[0][1], 1000)) == set(range(10, 31))
#print "0 <= x - 10"
#s,r = claripy.balancer.Balancer(claripy.backends.vsa, 0 <= x - 10).compat_ret
#assert s
#assert r[0][0] is x
if __name__ == '__main__':
test_overflow()
test_simple_guy()
test_widened_guy()
test_complex_guy()
|
edent/BMW-i-Remote | python/bmw.py | Python | mit | 4,929 | 0.009333 | #! /usr/bin/env python
#
# Use the BMW ConnectedDrive API using credentials from credentials.json
# You can see what should be in there by looking at credentials.json.sample.
#
# 'auth_basic' is the base64-encoded version of API key:API secret
# You can capture it if you can intercept the traffic from the app at
# the time when reauthentication is happening.
#
# Based on the excellent work by Terence Eden:
# https://github.com/edent/BMW-i-Remote
import json
import requests
import time
# API Gateway
# North America: b2vapi.bmwgroup.us
# Rest of World: b2vapi.bmwgroup.com
# China: b2vapi.bmwgroup.cn:8592
SERVER = "b2vapi.bmwgroup.us"
AUTH_URL = 'https://' + SERVER + '/gcdm/oauth/token'
API_ROOT_URL = 'https://' + SERVER + '/webapi/v1'
# What are we pretending to be? Not sure if this is important.
# Might be tied to OAuth consumer (auth_basic) credentials?
USER_AGENT = "MCVApp/1.5.2 (iPhone; iOS 9.1; Scale/2.00)"
# USER_AGENT = "Dalvik/2.1.0 (Linux; U; Android 5.1.1; Nexus 6 Build/LMY48Y)"
# Constants
# To convert km to miles:
# miles = km * KM_TO_MILES
KM_TO_MILES = 0.621371
# To convert kWh/100Km to Miles/kWh:
# 1 / (EFFICIENCY * avgElectricConsumption)`
EFFICIENCY = 0.01609344
# For future use
class ConnectedDriveException(Exception):
pass
class ConnectedDrive(object):
"""
A wrapper for the BMW ConnectedDrive API used by mobile apps.
Caches credentials in credentials_file, so needs both read
and write access to it.
"""
def __init__(self, credentials_file='credentials.json'):
self.credentials_file = credentials_file
with open(self.credentials_file,"r") as cf:
credentials = json.load(cf)
self.username = credentials["username"]
self.password = credentials["password"]
self.auth_basic = credentials["auth_basic"]
self.access_token = credentials["access_token"]
self.token_expiry = credentials["token_expiry"]
# If the access_token has expired, generate a new one and use that
if (time.time() > self.token_expiry):
self.generateCredentials()
def generateCredentials(self):
"""
If previous token has expired, create a new one from basics.
"""
headers = {
"Authorization": "Basic " + self.auth_basic,
"Content-Type": "application/x-www-form-urlencoded",
"User-Agent": USER_AGENT
}
data = {
"grant_type": "password",
"username": self.username,
"password": self.password,
"scope": "remote_services vehicle_data"
}
r = requests.post(AUTH_URL, data=data, headers=headers)
json_data = r.json()
# Get the access token
self.access_token = json_data["access_token"]
self.token_expiry = time.time() + json_data["expires_in"]
self.saveCredentials()
def saveCredentials(self):
"""
Save current state to the JSON file.
"""
credentials = {
"username": self.username,
"password": self.password,
"auth_basic": self.auth_basic,
"access_token": self.access_token,
"token_expiry": self.token_expiry
}
# Open a file for writing
with open(self.credentials_file, "w") as credentials_file:
json.dump(credentials, credentials_file, indent=4)
def cal | l(self, path, post_data=None):
"""
Call the API at the given path.
Argument should be relative to the API base | URL, e.g:
print c.call('/user/vehicles/')
If a dictionary 'post_data' is specified, the request will be
a POST, otherwise a GET.
"""
#
if (time.time() > self.token_expiry):
self.generateCredentials()
headers = {"Authorization": "Bearer " + self.access_token,
"User-Agent":USER_AGENT}
if post_data is None:
r = requests.get(API_ROOT_URL + path, headers=headers)
else:
r = requests.post(API_ROOT_URL + path, headers=headers, data=post_data)
return r.json()
def executeService(self, vin, serviceType):
"""
Post a request for the specified service. e.g.
print c.executeService(vin, 'DOOR_LOCK')
"""
return self.call("/user/vehicles/{}/executeService".format(vin),
{'serviceType': serviceType})
# A simple test example
def main():
c = ConnectedDrive()
print "\nVehicle info"
resp = c.call('/user/vehicles/')
car = resp['vehicles'][0]
for k,v in car.items():
print " ",k, " : ", v
print "\nVehicle status"
status = c.call("/user/vehicles/{}/status".format(car['vin']))['vehicleStatus']
for k,v in status.items():
print " ", k, " : ", v
if __name__ == '__main__':
main()
|
emollient/WITRapp | witrapp/witrapp/__init__.py | Python | gpl-2.0 | 654 | 0 | from pyramid.config import Configurator
from sqlalchemy import engine_from_config
from .models import (
DBSession,
Base,
)
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
engine = engine_f | rom_config(settings, ' | sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
config = Configurator(settings=settings)
config.include('pyramid_mako')
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('home', '/')
config.add_route('all_songs_play', '/allsongs')
config.scan()
return config.make_wsgi_app()
|
jmazon/libanki | anki/importing/anki10.py | Python | gpl-3.0 | 3,247 | 0.000616 | # -*- coding: utf-8 -*-
# Copyright: Damien Elmes <anki@ichi2.net>
# License: GNU GPL, version 3 or later; http://www.gnu.org/copyleft/gpl.html
"""\
Importing Anki 0.9+ decks
==========================
"""
__docformat__ = 'restructuredtext'
from anki import DeckStorage
from anki.importing import Importer
from anki.sync import SyncClient, SyncServer, copyLocalMedia
from anki.lang import _
from anki.utils import ids2str
from anki.deck import NEW_CARDS_RANDOM
import time
class Anki10Importer(Importer):
needMapper = False
def doImport(self):
"Import."
random = self.deck.newCardOrder == NEW_CARDS_RANDOM
num = 4
if random:
num += 1
self.deck.startProgress(num)
self.deck.updateProgress(_("Importing..."))
src = DeckStorage.Deck(self.file)
client = SyncClient(self.deck)
server = SyncServer(src)
client.setServer(server)
# if there is a conflict, sync local -> src
client.localTime = self.deck.modified
client.remoteTime = 0
src.s.execute("update facts set modified = 1")
src.s.execute("update models set modified = 1")
src.s.execute("update cards set modified = 1")
src.s.execute("update media set created = 1")
self.deck.s.flush()
# set up a custom change list and sync
lsum = client.summary(0)
self._clearDeleted(lsum)
rsum = server.summary(0)
self._clearDeleted(rsum)
payload = client.genPayload((lsum, rsum))
# no need to add anything to src
payload['added-models'] = []
payload['added-cards'] = []
payload['added-facts'] = {'facts': [], 'fields': []}
assert payload['deleted-facts'] == []
assert payload['deleted-cards'] == []
assert payload['deleted-models'] == []
self.deck.updateProgress()
res = server.applyPayload(payload)
self.deck.updateProgress()
client.applyPayloadReply(res)
copyLocalMedia(server.deck, client.deck)
# add tags
self.deck.updateProgress()
fids = [f[0] for f in res['added-facts']['facts']]
self.deck.addTags(fids, self.tagsToAdd)
# mark import material as newly added
self.deck.s.statement(
"update cards set modified = :t where id in %s" %
ids2str([x[0] for x in res['added-cards']]), t=time.time())
self.deck.s.statement(
"update facts set modified = :t where id | in %s" %
ids2str([x[0] for x in res['added-facts']['facts']]), t=time.time())
self.deck.s.statement(
"update models set modified = :t where id in %s" %
ids2str([x['id'] for x in res['added-models']]), t=time.time())
# update total and refresh
self.total = len(res['added-facts']['facts'])
src.s.rollback()
src.engine.dispose()
| # randomize?
if random:
self.deck.updateProgress()
self.deck.randomizeNewCards([x[0] for x in res['added-cards']])
self.deck.flushMod()
self.deck.finishProgress()
def _clearDeleted(self, sum):
sum['delcards'] = []
sum['delfacts'] = []
sum['delmodels'] = []
|
sdpython/ensae_teaching_cs | src/ensae_teaching_cs/data/gutenberg.py | Python | mit | 1,949 | 0.000513 | # -*- coding: utf-8 -*-
"""
@file
@brief Link to data from `Gutenberg <http://www.gutenberg.org/>`_,
provides an automated way to get the data from this website.
Some data may be replicated here to unit test notebooks.
"""
import os
import urllib.request
from urllib.error import URLError
def gutenberg_name(name="condamne", local=False, load=False):
"""
Retrieves data from `Gutenberg <http://www.gutenberg.org/>`_.
@param name name of the requested data
@param local use local version
@param load load the data
@return content or filename or url
List of available datasets:
* ``condamne``: `Le dernier jour d'un condamné <http://www.gutenberg.org/ebooks/6838>`_, Victor Hugo
"""
this = os.path.abspath(os.path.dirname(__file__))
data = os.path.join(this, "data_gutenberg")
if name == "condamne":
url = "http://www.gutenberg.org/cache/epub/6838/pg6838.txt"
loc = os.path.join(data, "pg6838.txt")
if load:
if not local:
try:
with urllib.request.urlopen(url) as u:
| text = u.read()
u.close()
except URLError:
# we switch to local
text = None
if text is not None:
text = text.decode("utf8")
return text
if not os.path.exists(loc):
raise FileNotFoundErr | or(loc)
with open(loc, "r", encoding="utf8") as f:
text = f.read()
return text
else:
if local:
if not os.path.exists(loc):
raise FileNotFoundError(loc)
return loc
else:
return url
else:
raise ValueError(
"unknown name '{0}', check the code of the function".format(name))
|
ongame-entretenimento/cadastro | usuario/migrations/0004_usuario_hash.py | Python | mit | 446 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
| dependencies = [
('usuario', '000 | 3_auto_20140909_1051'),
]
operations = [
migrations.AddField(
model_name='usuario',
name='hash',
field=models.CharField(default=0, max_length=32),
preserve_default=False,
),
]
|
sandvine/horizon | openstack_dashboard/urls.py | Python | apache-2.0 | 1,914 | 0 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file excep | t in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable l | aw or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
URL patterns for the OpenStack Dashboard.
"""
from django.conf import settings
from django.conf.urls import include
from django.conf.urls.static import static # noqa
from django.conf.urls import url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns # noqa
from django.views import defaults
import horizon
from openstack_dashboard.api import rest
from openstack_dashboard import views
urlpatterns = [
url(r'^$', views.splash, name='splash'),
url(r'^api/', include(rest.urls)),
url(r'', include(horizon.urls)),
]
for u in getattr(settings, 'AUTHENTICATION_URLS', ['openstack_auth.urls']):
urlpatterns.append(url(r'^auth/', include(u)))
# Development static app and project media serving using the staticfiles app.
urlpatterns += staticfiles_urlpatterns()
# Convenience function for serving user-uploaded media during
# development. Only active if DEBUG==True and the URL prefix is a local
# path. Production media should NOT be served by Django.
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
urlpatterns.append(url(r'^500/$', defaults.server_error))
|
asedunov/intellij-community | python/testData/copyPaste/EmptyFunctionCaretAtNoneIndent.src.py | Python | apache-2.0 | 153 | 0.052288 | class C:
def empty(s | elf):
def another(self):
<selection> """
line 1
line 2
"""
pass
</selection> | |
victorwestmann/wttd | eventex/subscriptions/tests.py | Python | lgpl-2.1 | 1,211 | 0.024773 | # coding: utf-8
from django.test import TestCase
from eventex.subscriptions.forms import SubscriptionForm
class SubscribeTest(TestCase):
def setUp(self):
self.resp = self.client.get('/inscricao/')
def test_get(self):
'GET /inscricao/ must return status code 200.'
self.assertEqual(200, self.resp.status_code)
def test_template(self):
'Response should be a rendered template.'
self.assertTem | plateUsed(self.resp,
'subscriptions/subscription_form.html')
def test_html(self):
'Html must contain input controls.'
self.assertContains(self.resp, '<form')
self.assertContains(self.resp, '<input', 6)
self.assertContains(self.resp, 'type="text"', 3)
self. | assertContains(self.resp, 'type="email"')
self.assertContains(self.resp, 'type="submit"')
def test_csrf(self):
'Html must contain csrf token.'
self.assertContains(self.resp, 'csrfmiddlewaretoken')
def test_has_form(self):
'Context must have the subscription form.'
form = self.resp.context['form']
self.assertIsInstance(form, SubscriptionForm)
def test_form_has_fields(self):
'Form must have 4 fields.'
form = self.resp.context['form']
self.assertItemsEqual(['name', 'email', 'cpf', 'phone'], form.fields) |
LumaPictures/rez | src/rez/serialise.py | Python | lgpl-3.0 | 11,794 | 0.000933 | """
Read and write data from file. File caching via a memcached server is supported.
"""
from rez.package_resources_ import package_rex_keys
from rez.utils.scope import ScopeContext
from rez.utils.sourcecode import SourceCode, early, late, include
from rez.utils.logging_ i | mport print_debug
from rez.utils.fil | esystem import TempDirs
from rez.exceptions import ResourceError, InvalidPackageError
from rez.utils.memcached import memcached
from rez.utils.syspath import add_sys_paths
from rez.config import config
from rez.vendor.enum import Enum
from rez.vendor import yaml
from contextlib import contextmanager
from inspect import isfunction, ismodule, getargspec
from StringIO import StringIO
import sys
import os
import os.path
tmpdir_manager = TempDirs(config.tmpdir, prefix="rez_write_")
file_cache = {}
class FileFormat(Enum):
py = ("py",)
yaml = ("yaml",)
txt = ("txt",)
__order__ = "py,yaml,txt"
def __init__(self, extension):
self.extension = extension
@contextmanager
def open_file_for_write(filepath):
"""Writes both to given filepath, and tmpdir location.
This is to get around the problem with some NFS's where immediately reading
a file that has just been written is problematic. Instead, any files that we
write, we also write to /tmp, and reads of these files are redirected there.
"""
stream = StringIO()
yield stream
content = stream.getvalue()
filepath = os.path.realpath(filepath)
tmpdir = tmpdir_manager.mkdtemp()
cache_filepath = os.path.join(tmpdir, os.path.basename(filepath))
with open(filepath, 'w') as f:
f.write(content)
with open(cache_filepath, 'w') as f:
f.write(content)
file_cache[filepath] = cache_filepath
def load_from_file(filepath, format_=FileFormat.py, update_data_callback=None):
"""Load data from a file.
Note:
Any functions from a .py file will be converted to `SourceCode` objects.
Args:
filepath (str): File to load.
format_ (`FileFormat`): Format of file contents.
update_data_callback (callable): Used to change data before it is
returned or cached.
Returns:
dict.
"""
filepath = os.path.realpath(filepath)
cache_filepath = file_cache.get(filepath)
if cache_filepath:
# file has been written by this process, read it from /tmp to avoid
# potential write-then-read issues over NFS
return _load_file(filepath=cache_filepath,
format_=format_,
update_data_callback=update_data_callback)
else:
return _load_from_file(filepath=filepath,
format_=format_,
update_data_callback=update_data_callback)
def _load_from_file__key(filepath, format_, update_data_callback):
st = os.stat(filepath)
if update_data_callback is None:
callback_key = 'None'
else:
callback_key = getattr(update_data_callback, "__name__", "None")
return str(("package_file", filepath, str(format_), callback_key,
st.st_ino, st.st_mtime))
@memcached(servers=config.memcached_uri if config.cache_package_files else None,
min_compress_len=config.memcached_package_file_min_compress_len,
key=_load_from_file__key,
debug=config.debug_memcache)
def _load_from_file(filepath, format_, update_data_callback):
return _load_file(filepath, format_, update_data_callback)
def _load_file(filepath, format_, update_data_callback):
load_func = load_functions[format_]
if config.debug("file_loads"):
print_debug("Loading file: %s" % filepath)
with open(filepath) as f:
result = load_func(f, filepath=filepath)
if update_data_callback:
result = update_data_callback(format_, result)
return result
def load_py(stream, filepath=None):
"""Load python-formatted data from a stream.
Args:
stream (file-like object).
Returns:
dict.
"""
scopes = ScopeContext()
g = dict(scope=scopes,
early=early,
late=late,
include=include,
InvalidPackageError=InvalidPackageError)
try:
exec stream in g
except Exception as e:
import traceback
frames = traceback.extract_tb(sys.exc_info()[2])
while filepath and frames and frames[0][0] != filepath:
frames = frames[1:]
msg = "Problem loading %s: %s" % (filepath, str(e))
stack = ''.join(traceback.format_list(frames)).strip()
if stack:
msg += ":\n" + stack
raise ResourceError(msg)
result = {}
excludes = set(('scope', 'InvalidPackageError', '__builtins__',
'early', 'late', 'include'))
for k, v in g.iteritems():
if k not in excludes and \
(k not in __builtins__ or __builtins__[k] != v):
result[k] = v
result.update(scopes.to_dict())
result = process_python_objects(result, filepath=filepath)
return result
class EarlyThis(object):
"""The 'this' object for @early bound functions."""
def __init__(self, data):
self._data = data
def __getattr__(self, attr):
missing = object()
value = self._data.get(attr, missing)
if value is missing:
raise AttributeError("No such package attribute '%s'" % attr)
if isfunction(value) and (hasattr(value, "_early") or hasattr(value, "_late")):
raise ValueError(
"An early binding function cannot refer to another early or "
"late binding function: '%s'" % attr)
return value
def process_python_objects(data, filepath=None):
"""Replace certain values in the given package data dict.
Does things like:
* evaluates @early decorated functions, and replaces with return value;
* converts functions into `SourceCode` instances so they can be serialized
out to installed packages, and evaluated later;
* strips some values (modules, __-leading variables) that are never to be
part of installed packages.
Returns:
dict: Updated dict.
"""
def _process(value):
if isinstance(value, dict):
for k, v in value.items():
value[k] = _process(v)
return value
elif isfunction(value):
func = value
if hasattr(func, "_early"):
# run the function now, and replace with return value
#
# make a copy of the func with its own globals, and add 'this'
import types
fn = types.FunctionType(func.func_code,
func.func_globals.copy(),
name=func.func_name,
argdefs=func.func_defaults,
closure=func.func_closure)
this = EarlyThis(data)
fn.func_globals.update({"this": this})
with add_sys_paths(config.package_definition_build_python_paths):
# this 'data' arg support isn't needed anymore, but I'm
# supporting it til I know nobody is using it...
#
spec = getargspec(func)
args = spec.args or []
if len(args) not in (0, 1):
raise ResourceError("@early decorated function must "
"take zero or one args only")
if args:
value_ = fn(data)
else:
value_ = fn()
# process again in case this is a function returning a function
return _process(value_)
elif hasattr(func, "_late"):
return SourceCode(func=func, filepath=filepath,
eval_as_function=True)
elif func.__name__ in package_rex_keys:
# if a rex function, the code has to be eval'd NOT as a f |
hivesolutions/appier | src/appier/test/model.py | Python | apache-2.0 | 28,833 | 0.006903 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Appier Framework
# Copyright (c) 2008-2021 Hive Solutions Lda.
#
# This file is part of Hive Appier Framework.
#
# Hive Appier Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Appier Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Appier Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2021 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import unittest
import appier
from . import mock
class ModelTest(unittest.TestCase):
def setUp(self):
self.app = appier.App()
self.app._register_models_m(mock, "Mocks")
def tearDown(self):
self.app.unload()
adapter = appier.get_adapter()
adapter.drop_db()
def test_basic(self):
person = mock.Person(fill = False)
person.name = "Name"
self.assertEqual(person.name, "Name")
self.assertEqual(person["name"], "Name")
self.assertEqual(len(person), 1)
person["age"] = 20
self.assertEqual(person.age, 20)
self.assertEqual(person["age"], 20)
self.assertEqual(len(person), 2)
self.assertEqual("age" in person, True)
self.assertEqual("boss" in person, False)
self.assertEqual(bool(person), True)
del person["name"]
self.assertRaises(AttributeError, lambda: person.name)
self.assertRaises(KeyError, lambda: person["name"])
del person.age
self.assertRaises(AttributeError, lambda: person.age)
self.assertRaises(KeyError, lambda: person["age"])
self.assertEqual(bool(person), False)
def test_find(self):
result = mock.Person.find(age | = 1)
self.assertEqual(len(result), 0)
person = mock.Person()
person.age = 1
person.name = "Name"
person.save()
result = mock.Person.find(age = 1)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].age, 1)
def test_count(self):
result = mock.Person.c | ount()
self.assertEqual(result, 0)
person = mock.Person()
person.age = 1
person.name = "Name"
person.save()
result = mock.Person.count()
self.assertEqual(result, 1)
def test_delete(self):
result = mock.Person.count()
self.assertEqual(result, 0)
person = mock.Person()
person.age = 1
person.name = "Name"
person.save()
result = mock.Person.count()
self.assertEqual(result, 1)
person.delete()
result = mock.Person.count()
self.assertEqual(result, 0)
def test_advance(self):
result = mock.Person.count()
self.assertEqual(result, 0)
person = mock.Person()
person.age = 1
person.name = "Name"
person.save()
result = mock.Person.count()
self.assertEqual(result, 1)
result = person.advance("age")
self.assertEqual(result, 2)
self.assertEqual(person.age, 2)
person = person.reload()
self.assertEqual(person.age, 2)
result = person.advance("age", delta = 2)
self.assertEqual(result, 4)
self.assertEqual(person.age, 4)
person = person.reload()
self.assertEqual(person.age, 4)
result = person.advance("age", delta = -2)
self.assertEqual(result, 2)
self.assertEqual(person.age, 2)
person = person.reload()
self.assertEqual(person.age, 2)
def test_validation(self):
person = mock.Person()
self.assertRaises(appier.ValidationError, person.save)
person = mock.Person()
person.name = "Name"
person.save()
person = mock.Person()
person.name = "Name"
self.assertRaises(appier.ValidationError, person.save)
def test_map(self):
person = mock.Person()
person.name = "Name"
self.assertEqual(person.name, "Name")
person.save()
self.assertEqual(person.identifier, 1)
self.assertEqual(person.identifier_safe, 1)
self.assertEqual(person.name, "Name")
person_m = person.map()
self.assertEqual(isinstance(person_m, dict), True)
self.assertEqual(person_m["identifier"], 1)
self.assertEqual(person_m["identifier_safe"], 1)
self.assertEqual(person_m["name"], "Name")
person.age = 20
person.hidden = "Hidden"
self.assertEqual(person.age, 20)
self.assertEqual(person.hidden, "Hidden")
person_m = person.map(all = True)
self.assertEqual(isinstance(person_m, dict), True)
self.assertEqual(person_m["identifier"], 1)
self.assertEqual(person_m["identifier_safe"], 1)
self.assertEqual(person_m["name"], "Name")
self.assertEqual(person_m["age"], 20)
self.assertEqual(person_m["hidden"], "Hidden")
cat = mock.Cat()
cat.name = "NameCat"
self.assertEqual(cat.name, "NameCat")
cat.save()
self.assertEqual(cat.identifier, 1)
person.cats = [cat]
person.save()
person_m = person.map(resolve = True, all = True)
self.assertEqual(isinstance(person_m, dict), True)
self.assertEqual(isinstance(person_m["cats"], list), True)
self.assertEqual(isinstance(person_m["cats"][0], dict), True)
self.assertEqual(person_m["cats"][0]["identifier"], 1)
self.assertEqual(person_m["cats"][0]["identifier_safe"], 1)
self.assertEqual(person_m["cats"][0]["name"], "NameCat")
person = mock.Person.get(identifier = 1)
self.assertEqual(person.cats[0].name, "NameCat")
person = mock.Person.get(identifier = 1)
person_m = person.map(all = True)
self.assertEqual(person_m["cats"][0], 1)
person_m = person.map(resolve = True, all = True)
self.assertEqual(isinstance(person_m, dict), True)
self.assertEqual(isinstance(person_m["cats"], list), True)
self.assertEqual(isinstance(person_m["cats"][0], dict), True)
self.assertEqual(person_m["cats"][0]["identifier"], 1)
self.assertEqual(person_m["cats"][0]["identifier_safe"], 1)
self.assertEqual(person_m["cats"][0]["name"], "NameCat")
def test_increment(self):
person = mock.Person()
person.name = "Name1"
person.save()
self.assertEqual(person.identifier, 1)
self.assertEqual(person.name, "Name1")
person = mock.Person()
person.name = "Name2"
person.save()
self.assertEqual(person.identifier, 2)
self.assertEqual(person.name, "Name2")
person = mock.Person()
person.name = "Name3"
person.save()
self.assertEqual(person.identifier, 3)
self.assertEqual(person.name, "Name3")
person.delete()
person = mock.Person()
person.name = "Name4"
person.save()
self.assertEqual(person.identifier, 4)
self.assertEqual(person.name, "Name4")
def test_ensure_m |
Kortemme-Lab/kddg | .test/settings.py | Python | mit | 262 | 0 | #!/usr/bin/python2
"""
.te | st/settings.py
A simple test script for the settings module.
Created by Shane O'Connor 2016.
"""
import kddg.api.settings as settings
sys_settings = settings.load()
print(s | ys_settings.database)
print(sys_settings.database.username)
|
huqa/pyfibot_modules | module_horo.py | Python | mit | 933 | 0.00431 | # -*- coding: utf-8 -*-
"""
Fetches horoscopes from iltalehti.fi
Created on Oct 17, 2012
@author: huqa / pikkuhukka@gmail.com
"""
import re
horo_url = "http://www.iltalehti.fi/horoskooppi"
def command_horo(bot, user, channel, args):
"""Hakee päivittäisen horoskoopin. Käyttö !horo | <horoskooppimerkki>"""
nick = getNick(user)
if not args:
return bot.say(channel, "lähe ny pelle menee %s" % nick)
haku = args.decode('iso-8859-1')
haku = haku.title()
soup = getUrl(horo_url).getBS()
merkki = None
for m in soup.findAll("div", "valiotsikko"):
if m.find(text=re.compile(haku+"*")):
merkki = m.find(text=re.compile(haku+"*"))
| break
if not merkki:
return bot.say(channel, "opettele ny kirjottaan kevyt pelle %s" % nick)
tekstit = merkki.next.contents[0]
bot.say(channel, "%s %s" % (str(merkki), str(tekstit)))
|
francisrod01/wrangling_mongodb | lesson 6/name.py | Python | mit | 1,880 | 0.000532 | #!~/envs/udacity_python3_mongodb
# -*- coding: utf-8 -*-
"""
In this problem set you work with cities infobox data, audit it, come up with a
cleaning idea and then clean it up.
In the previous quiz you recognized that the "name" value can be an array (or
list in Python terms). It would make it easier to process and query the data
later if all values for the name are in a Python list, instead of being
just a string separated with special characters, like now.
Finish the function fix_name(). It will receive a string as an input, and it
will return a list of all the names. If there is only one name, the list will
have only one item in it; if the name is "NULL", the list should be empty.
The rest of the code is just an example on how this function can be used.
"""
import codecs
import csv
import pprint
import re
DIR_DATA = 'datasets/'
CITIES_DATA = DIR_DATA + 'cities.csv'
def fix_name(name):
if re.match(r'^{', name):
values = name.strip('{,}').split('|')
return values
elif name == 'NULL':
return []
else:
return [name]
def process_file(filename):
data = []
with open(filename, "r") as f:
reader = csv.DictReader(f)
# Skipping the extra metadata
for i in range(3):
l = reader.__next__()
# Processing file
for line in reader:
# Calling your function to fix the area value
if "name" in line:
| line["name"] = fix_name(line["name"])
data.append(line)
return data
def test():
data = process_file(CITIES_DATA)
print("Printing 20 results:")
for n in range(20):
pprint.pprint(data[n]["name" | ])
assert data[14]["name"] == ['Negtemiut', 'Nightmute']
assert data[9]["name"] == ['Pell City Alabama']
assert data[3]["name"] == ['Kumhari']
if __name__ == "__main__":
test()
|
vlki/spacewalk-xmlrpc-tests | tests/xmlrpc_scripts/frontend/errata.clone.py | Python | bsd-3-clause | 1,293 | 0.003867 | #!/usr/bin/env python
# Copyright (c) 2011, Jan Vlcek
# All rights reserved.
#
# For further information see enclosed LICENSE file
#
# Python script calling the XML-RPC errata.clone of local Spacewalk server.
#
# Usage:
# ./errata.clone.py session-key channel-label [adv-name [adv-name [...]]]
#
# Author: Jan Vlcek <xvlcek03@stud.fit.vutbr.cz>
#
import sys
import xmlrpclib
import getopt
from frontend import client, spacewalkLogin, spacewalkPassword
def main(argv):
"""
The main function called when this script is executed.
"""
if len(argv) < 2:
usage()
sys.exit(2)
sessionKey = argv[0]
channelLabel = argv[1]
advisoryNames = argv[2:]
errata = c | lient.errata.clone(sessionKey, channelLabel, advisoryNames)
for erratum in errata:
| print(erratum['advisory_name'])
def usage():
"""
Prints the usage information.
"""
print("Python script calling the XML-RPC errata.clone")
print("of Spacewalk frontend API.")
print("")
print("Cloned erratum advisory names are printed by line")
print("")
print("Usage:")
print(" ./errata.clone.py session-key channel-label")
print(" [adv-name [adv-name [...]]]")
print("")
if __name__ == "__main__":
main(sys.argv[1:])
|
hastexo/hastexo-xblock | hastexo/provider.py | Python | agpl-3.0 | 27,375 | 0 | import time
import base64
import binascii
import hashlib
import logging
import paramiko
import random
import string
import yaml
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
from heatclient.exc import HTTPException, HTTPNotFound
from keystoneauth1.exceptions.http import HttpError
from novaclient.exceptions import ClientException
from googleapiclient.errors import Error as GcloudApiError
from googleapiclient.errors import HttpError as GcloudApiHttpError
from .common import (
b,
get_xblock_settings,
IN_PROGRESS,
FAILED,
CREATE_COMPLETE,
DELETE_COMPLETE,
DELETE_IN_PROGRESS,
RESUME_COMPLETE,
RESUME_IN_PROGRESS,
SUSPEND_COMPLETE,
SUSPEND_IN_PROGRESS
)
from | .op | enstack import HeatWrapper, NovaWrapper
from .gcloud import GcloudDeploymentManager, GcloudComputeEngine
class ProviderException(Exception):
pass
class Provider(object):
"""
Base class for provider drivers.
"""
default_credentials = None
credentials = None
name = None
capacity = None
template = None
environment = None
sleep_seconds = None
@staticmethod
def init(name):
settings = get_xblock_settings()
sleep_seconds = settings.get("sleep_timeout", 10)
providers = settings.get("providers")
config = providers.get(name)
if config and isinstance(config, dict):
provider_type = config.get("type")
if provider_type == "openstack" or not provider_type:
return OpenstackProvider(name, config, sleep_seconds)
elif provider_type == "gcloud":
return GcloudProvider(name, config, sleep_seconds)
def __init__(self, name, config, sleep):
self.name = name
self.sleep_seconds = sleep
self.reset_logger()
# Get credentials
if config and isinstance(config, dict):
credentials = {}
for key, default in self.default_credentials.items():
credentials[key] = config.get(key, default)
self.credentials = credentials
else:
error_msg = ("No configuration provided for provider %s" %
self.name)
raise ProviderException(error_msg)
def set_logger(self, logger):
"""Set a logger other than the standard one.
This is meant to be used from Celery tasks, which usually
would want to use their task logger for logging.
"""
self.logger = logger
def reset_logger(self):
"""Reset the logger back to the standard one."""
self.logger = logging.getLogger(__name__)
def set_capacity(self, capacity):
if capacity in (None, "None"):
capacity = -1
else:
try:
capacity = int(capacity)
except (TypeError, ValueError):
# Invalid capacity: disable the provider
capacity = 0
self.capacity = capacity
def set_template(self, template):
if not template:
error_msg = ("No template provided for provider %s" % self.name)
raise ProviderException(error_msg)
self.template = template
def set_environment(self, environment):
if not environment:
error_msg = ("No environment provided for provider %s" % self.name)
raise ProviderException(error_msg)
self.environment = environment
def sleep(self):
time.sleep(self.sleep_seconds)
def generate_key_pair(self, encodeb64=False):
keypair = {}
pkey = paramiko.RSAKey.generate(1024)
keypair["public_key"] = pkey.get_base64()
s = StringIO()
pkey.write_private_key(s)
k = s.getvalue()
s.close()
if encodeb64:
k = base64.b64encode(b(k))
keypair["private_key"] = k
return keypair
def generate_random_password(self, length):
abc = string.ascii_lowercase
return "".join(random.choice(abc) for i in range(length))
def get_stacks(self):
raise NotImplementedError()
def get_stack(self):
raise NotImplementedError()
def create_stack(self):
raise NotImplementedError()
def delete_stack(self):
raise NotImplementedError()
def suspend_stack(self):
raise NotImplementedError()
def resume_stack(self):
raise NotImplementedError()
class OpenstackProvider(Provider):
"""
OpenStack provider driver.
"""
default_credentials = {
"os_auth_url": "",
"os_auth_token": "",
"os_username": "",
"os_password": "",
"os_user_id": "",
"os_user_domain_id": "",
"os_user_domain_name": "",
"os_project_id": "",
"os_project_name": "",
"os_project_domain_id": "",
"os_project_domain_name": "",
"os_region_name": ""
}
heat_c = None
nova_c = None
def __init__(self, provider, config, sleep):
super(OpenstackProvider, self).__init__(provider, config, sleep)
self.heat_c = self._get_heat_client()
self.nova_c = self._get_nova_client()
def _get_heat_client(self):
return HeatWrapper(**self.credentials).get_client()
def _get_nova_client(self):
return NovaWrapper(**self.credentials).get_client()
def _get_stack_outputs(self, heat_stack):
outputs = {}
for o in getattr(heat_stack, 'outputs', []):
output_key = o["output_key"]
output_value = o["output_value"]
outputs[output_key] = output_value
return outputs
def get_stacks(self):
stacks = []
try:
heat_stacks = self.heat_c.stacks.list()
except HTTPNotFound:
return stacks
except (HTTPException, HttpError) as e:
raise ProviderException(e)
if heat_stacks:
for heat_stack in heat_stacks:
stack = {
"name": heat_stack.stack_name,
"status": heat_stack.stack_status
}
stacks.append(stack)
return stacks
def get_stack(self, name):
try:
self.logger.debug('Fetching information on '
'OpenStack Heat stack [%s]' % name)
heat_stack = self.heat_c.stacks.get(stack_id=name)
except HTTPNotFound:
status = DELETE_COMPLETE
outputs = {}
except (HTTPException, HttpError) as e:
raise ProviderException(e)
else:
status = heat_stack.stack_status
outputs = self._get_stack_outputs(heat_stack)
return {"status": status,
"outputs": outputs}
def create_stack(self, name, run):
if not self.template:
raise ProviderException("Template not set for provider %s." %
self.name)
try:
self.logger.info('Creating OpenStack Heat stack [%s]' % name)
res = self.heat_c.stacks.create(
stack_name=name,
template=self.template,
environment=self.environment,
parameters={'run': run}
)
except (HTTPException, HttpError) as e:
raise ProviderException(e)
stack_id = res['stack']['id']
# Sleep to avoid throttling.
self.sleep()
try:
heat_stack = self.heat_c.stacks.get(stack_id=stack_id)
except (HTTPException, HttpError) as e:
raise ProviderException(e)
status = heat_stack.stack_status
# Wait for stack creation
while IN_PROGRESS in status:
self.sleep()
try:
heat_stack = self.heat_c.stacks.get(stack_id=heat_stack.id)
except HTTPNotFound:
raise ProviderException("OpenStack Heat stack "
"disappeared during creation.")
except (HTTPException, HttpError) as e:
raise ProviderException(e)
status = heat_stack.stack_statu |
Yordan92/Pac-man-multiplayer | Ghosts.py | Python | gpl-3.0 | 5,718 | 0.047919 | from MakeGraph import MakeGraph
from Moving_pacman import PacMan
import pygame
class Ghost(MakeGraph):
index = 0
def __init__(self,class_graph,x,y):
Ghost.index = Ghost.index + 1
self.all_nodes = class_graph.get_nodes()
self.paths_to_all_nodes = class_graph.get_shortest_path()
self.path = []
self.hunting = False
self.name_image_u = "Ghost_red_up"
self.name_image_d = "Ghost_red_down"
self.name_image_l = "Ghost_red_left"
self.name_image_r = "Ghost_red_right"
self.name_image = self.name_image_u
self.cords={'x': x, 'y': y}
# {'x': 92, 'y': 161}
self.index = Ghost.index
def next_hop(self):
if self.path:
return self.path[0]
return []
def find_ghost_cords(self):
ghost_x = int(self.cords['y']/23)
ghost_y = int(self.cords['x']/23)
return (ghost_x,ghost_y)
def get_pictures(self):
if self.index == 0 :
self.name_image_u = "Ghost_red_up"
self.name_image_d = "Ghost_red_down"
self.name_image_l = "Ghost_red_left"
self.name_image_r = "Ghost_red_right"
if self.index == 1:
self.name_image_u = "Ghost_orange_up"
self.name_image_d = "Ghost_orange_down"
self.name_image_l = "Ghost_orange_left"
self.name_image_r = "Ghost_orange_right"
if self.index == 2:
self.name_image_u = "Ghost_pink_up"
self.name_image_d = "Ghost_pink_down"
self.name_image_l = "Ghost_pink_left"
self.name_image_r = "Ghost_pink_right"
if self.index == 3:
self.name_image_u = "Ghost_cyan_up"
self.name_image_d = "Ghost_cyan_down"
self.name_image_l = "Ghost_cyan_left"
self.name_image_r = "Ghost_cyan_right"
def find_closest_nodes(self):
closest_nodes =[]
ghost_x = int(self.cords['x']/23)
ghost_y = int(self.cords['y']/23)
vertex = (ghost_y,ghost_x)
queue = [vertex]
Visited = [vertex]
# if vertex in all_Nodes:
# all_Nodes.remove(vertex)
while queue != []:
new_v = queue.pop(0)
new_v_adj = [(new_v[0] - 1, new_v[1]),
(new_v[0] + 1, new_v[1]),
(new_v[0], new_v[1] - 1),
(new_v[0], new_v[1] + 1)]
for v_adj in new_v_adj:
if self.is_p_vertex(v_adj) and v_adj not in Visited:
if v_adj in self.all_nodes:
closest_nodes.append((v_adj[1],v_adj[0]))
else:
queue.append(v_adj)
Visited.append(v_adj)
return closest_nodes
def find_closest_vertex(self):
closest_nodes =[]
ghost_x = int(self.cords['x']/23)
ghost_y = int(self.cords['y']/23)
vertex = (ghost_y,ghost_x)
queue = [vertex]
map_to_a_vertex = {}
visited_n = [vertex]
# print (self.all_nodes)
if vertex in self.all_nodes:
return []
while queue != []:
new_v = queue.pop(0)
new_v_adj = [(new_v[0] - 1, new_v[1]),
(new_v[0] + 1, new_v[1]),
(new_v[0], new_v[1] - 1),
(new_v[0], new_v[1] + 1)]
for v_adj in new_v_adj:
map_to_a_vertex[v_adj] = new_v
if v_adj in self.all_nodes:
full_path = [v_adj]
while map_to_a_vertex[v_adj] != vertex:
v_adj = map_to_a_vertex[v_adj]
full_path.insert(0,v_adj)
return full_path
if MakeGraph.is_p_vertex(self,v_adj) and v_adj not in visited_n:
queue.append(v_adj)
visited_n.append(v_adj)
def ghost_move(self, pacman_vertex, pacman_cords):
my_cords = | (int(self.cords['y']/23),int(self.cords['x']/23))
if my_cords == pacman_vertex:
self.hunting = True
if self.hu | nting == True:
self.path = self.search_eat(pacman_cords)
if not self.path:
if self.hunting == True:
self.hunting = False
if self.find_closest_vertex() != []:
self.path = self.find_closest_vertex()
else:
for i in self.paths_to_all_nodes[my_cords][pacman_vertex]:
self.path.extend(2*[i])
def ghost_make_move(self):
# if not self.path:
# self.ghost_move(screen,pacman_vertex,pacman_cords)
new_step = self.path.pop(0)
old_step = (int(self.cords['y'] / 23),int(self.cords['x'])/23)
if old_step[0] == new_step[0] and old_step[1]<new_step[1]:
self.name_image = self.name_image_r
if old_step[0] == new_step[0] and old_step[1]>new_step[1]:
self.name_image = self.name_image_l
if old_step[0] < new_step[0] and old_step[1]==new_step[1]:
self.name_image = self.name_image_d
if old_step[0] > new_step[0] and old_step[1]==new_step[1]:
self.name_image = self.name_image_u
self.cords['y'] = new_step[0]*23
self.cords['x'] = new_step[1]*23
def search_eat(self,pacman_cords):
closest_nodes =[]
# pacman_x = int(pacman_cords['x']/23)
# pacman_y = int(pacman_cords['y']/23)
ghost_x = int(self.cords['x']/23)
ghost_y = int(self.cords['y']/23)
vertex = (ghost_y,ghost_x)
queue = [vertex]
map_to_a_vertex = {}
visited_n = [vertex]
if vertex == pacman_cords:
return []
while queue != []:
new_v = queue.pop(0)
new_v_adj = [(new_v[0] - 1, new_v[1]),
(new_v[0] + 1, new_v[1]),
(new_v[0], new_v[1] - 1),
(new_v[0], new_v[1] + 1)]
for v_adj in new_v_adj:
if self.is_p_vertex(v_adj) and v_adj not in visited_n:
queue.append(v_adj)
visited_n.append(v_adj)
map_to_a_vertex[v_adj] = new_v
if v_adj == pacman_cords:
# map_to_a_vertex[v_adj] = new_v
# print(map_to_a_vertex)
# print("abc",v_adj,new_v)
while map_to_a_vertex[v_adj] != vertex:
# print("abc",v_adj)
v_adj = map_to_a_vertex[v_adj]
return [v_adj]
return []
def draw_ghost(self,screen):
ghost = pygame.image.load("Ghosts/Ghost_cyan_down.png")
# print(self.find_closest_vertex())
self.ghost_move(screen,(14,13),(16,14))
# p = self.path[-1]
# pygame.draw.rect(screen, (124, 124, 0),
# (p[1]* 23, p[0] * 23, 23, 23))
screen.blit(ghost,(self.cords['x'], self.cords['y']))
|
ProfessionalIT/professionalit-webiste | sdk/google_appengine/google/appengine/ext/mapreduce/model.py | Python | lgpl-3.0 | 38,467 | 0.006603 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Model classes which are used to communicate between parts of implementation.
These model classes are describing mapreduce, its current state and
communication messages. They are either stored in the datastore or
serialized to/from json and passed around with other means.
"""
__all__ = ["MapreduceState",
"MapperSpec",
"MapreduceControl",
"MapreduceSpec",
"ShardState",
"CountersMap",
"TransientShardState",
"QuerySpec",
"HugeTask"]
import cgi
import datetime
import urllib
import zlib
import google
from google.appengine._internal.graphy.backends import google_chart_api
import simplejson
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import db
from google.appengine.ext.mapreduce import context
from google.appengine.ext.mapreduce import hooks
from google.appengine.ext.mapreduce import json_util
from google.appengine.ext.mapreduce import util
_MAP_REDUCE_KINDS = ("_GAE_MR_MapreduceControl",
"_GAE_MR_MapreduceState",
"_GAE_MR_ShardState",
"_GAE_MR_TaskPayload")
class _HugeTaskPayload(db.Model):
"""Model object to store task payload."""
payload = db.BlobProperty()
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_GAE_MR_TaskPayload"
class HugeTask(object):
"""HugeTask is a taskqueue.Task-like class that can store big payloads.
Payloads are stored either in the task payload itself or in the datastore.
Task handlers should inherit from base_handler.HugeTaskHandler class.
"""
PAYLOAD_PARAM = "__payload"
PAYLOAD_KEY_PARAM = "__payload_key"
MAX_TASK_PAYLOAD = taskqueue.MAX_PUSH_TASK_SIZE_BYTES - 1024
MAX_DB_PAYLOAD = datastore_rpc.BaseConnection.MAX_RPC_BYTES
PAYLOAD_VERSION_HEADER = "AE-MR-Payload-Version"
PAYLOAD_VERSION = "1"
def __init__(self,
url,
params,
name=None,
eta=None,
countdown=None,
parent=None,
headers=None):
"""Init.
Args:
url: task url in str.
params: a dict from str to str.
name: task name.
eta: task eta.
countdown: task countdown.
parent: parent entity of huge task's payload.
headers: a dict of headers for the task.
Raises:
ValueError: when payload is too big even for datastore, or parent is
not specified when payload is stored in datastore.
"""
self.url = url
self.name = name
self.eta = eta
self.countdown = countdown
self._headers = {
"Content-Type": "application/octet-stream",
self.PAYLOAD_VERSION_HEADER: self.PAYLOAD_VERSION
}
if headers:
self._headers.update(headers)
payload_str = urllib.urlencode(params)
compressed_payload = ""
if len(payload_str) > self.MAX_TASK_PAYLOAD:
compressed_payload = zlib.compress(payload_str)
if not compressed_payload:
self._payload = payload_str
elif len(compressed_payload) < self.MAX_TASK_PAYLOAD:
self._payload = self.PAYLOAD_PARAM + compressed_payload
elif len(compressed_payload) > self.MAX_DB_PAYLOAD:
raise ValueError(
"Payload from %s to big to be stored in database: %s" %
(self.name, len(compressed_payload)))
else:
if not parent:
raise ValueError("Huge tasks should specify parent entity.")
payload_entity = _HugeTaskPayload(payload=compressed_payload,
parent=parent)
payload_key = payload_entity.put()
self._payload = self.PAYLOAD_KEY_PARAM + str(payload_key)
def add(self, queue_name, transactional=False):
"""Add task to the queue."""
task = self.to_task()
task.add(queue_name, transactional)
def to_task(self):
"""Convert to a taskqueue task."""
return taskqueue.Task(
url=self.url,
payload=self._payload,
name=self.name,
eta=self.eta,
countdown=self.countdown,
headers=self._headers)
@classmethod
def decode_payload(cls, request):
"""Decode task payload.
HugeTask controls its own payload entirely including urlencoding.
It doesn't depend on any particular web framework.
Args:
request: a webapp Request instance.
Returns:
A dict of str to str. The same as the params argument to __init__.
Raises:
DeprecationWarning: When task payload constructed from an older
incompatible version of mapreduce.
"""
if request.headers.get(cls.PAYLOAD_VERSION_HEADER) != cls.PAYLOAD_VERSION:
raise DeprecationWarning(
"Task is generated by an older incompatible version of mapreduce. "
"Please kill this job manually")
return cls._decode_payload(request.body)
@classmethod
def _decode_payload(cls, body):
compressed_payload_str = None
if body.startswith(cls.PAYLOAD_KEY_PARAM):
payload_key = body[len(cls.PAYLOAD_KEY_PARAM):]
payload_entity = _HugeTaskPayload.get(payload_key)
compressed_payload_str = payload_entity.payload
elif body.startswith(cls.PAYLOAD_PARAM):
compressed_payload_str = body[len(cls.PAYLOAD_PARAM):]
if compressed_payload_str:
payload_str = zlib.decompress(compressed_payload_str)
else:
payload_str = body
result = {}
for (name, value) in cgi.parse_qs(payload_str).items():
if len(value) == 1:
result[name] = value[0]
else:
result[name] = value
return result
class CountersMap(json_util.JsonMixin):
"""Maintains map from counter name to counter value.
The class is used to provide basic arithmetics of counter values (buil
add/remove), increment individual values and store/load data from json.
"""
def __init__(self, initial_map=None):
"""Constructor.
Args:
initial_map: initial counter values map from counter name (string) to
counter value (int).
"""
if initial_map:
self.counters = initial_map
else:
self.counters = {}
def __repr__(self):
"""Compute string representation."""
return "mapreduce.model.CountersMap(%r)" % self.counters
def get(self, counter_name, default=0):
"""Get current counter value.
Args:
counter_name: counter name as string.
default: default value if one doesn't exist.
Returns:
current counter value as int. 0 if counter was not set.
"""
return self.counters.get(counter_name, default)
d | ef increment(self, counter_name, delta):
"""Increment counter valu | e.
Args:
counter_name: counter name as String.
delta: increment delta as Integer.
Returns:
new counter value.
"""
current_value = self.counters.get(counter_name, 0)
new_value = current_value + delta
self.counters[counter_name] = new_value
return new_value
def add_map(self, counters_map):
"""Add all counters from the map.
For each counter in the passed map, adds its value to the counter in this
map.
Args:
counters_map: CounterMap instance to add.
"""
for counter_name in counters_map.counters:
self.increment(counter_name, counters_map.counters[counter_name])
def sub_map(self, counters_map):
"""Subtracts all counters from the map.
For each counter in the passed map, subtracts its value to the counter in
this map.
Args:
counters |
fahri314/image-processing | 13.py | Python | gpl-3.0 | 649 | 0.032407 | # 12 nin düzenli hali
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import imsave
def convert(image_1): # rgb to gray level
img_1 = plt.imread(image_1)
img_2 = np.zeros((img_1.shape[0],i | mg_1.shape[1]))
for i in range (img_1.shape[0]):
for j in range (img_2.shape[1]):
img_2[i,j] = img_1[i,j,0]/3 + img_1[i,j,1]/3 + im | g_1[i,j,2]/3
imsave(r'C:\Users\user\Desktop\gray.png', img_2)
plt.subplot(1,2,1)
plt.imshow(img_1)
plt.subplot(1,2,2)
plt.imshow(img_2,cmap = 'gray')
plt.show()
convert(r'C:\Users\user\Desktop\eleven.png') |
bgossele/geminicassandra | geminicassandra/vep.py | Python | mit | 11,612 | 0.004995 | #!/usr/bin/env python
#############
# CSQ: Consequence|Codons|Amino_acids|Gene|hgnc|Feature|EXON|polyphen|sift|Protein_position|BIOTYPE
# missense_variant|gAg/gTg|E/V|ENSG00000188157||ENST00000379370|12/36|probably_damaging(0.932)|deleterious(0.02)|728/2045_protein_coding
# nc_transcript_variant|||ENSG00000116254|CHD5|ENST00000491020|5/6|||||
#############
from collections import defaultdict, namedtuple
import itertools
class EffectDetails(object):
def __init__(self, impact_string, severity, detail_string, counter, labels):
fields = self._prep_fields(detail_string, labels)
self.effect_severity = severity
self.effect_name = impact_string
self.anno_id = counter
fields.pop("consequence", None)
self.codon_change = fields.pop("codons", None)
self.aa_change = fields.pop("amino_acids", None)
self.ensembl_gene = fields.pop("gene", None)
self.hgnc = fields.pop("symbol", None)
self.gene = self.hgnc or self.ensembl_gene
self.transcript = fields.pop("feature", None)
self.exon = fields.pop("exon", None)
self.polyphen = fields.pop("polyphen", None)
self.sift = fields.pop("sift", None)
self.aa_length = fields.pop("protein_position", None)
self.biotype = fields.pop("biotype", None)
self.warnings = fields.pop("warning", None)
self.extra_fields = {"vep_%s" % k: v for k, v in fields.items()}
self.consequence = effect_dict[self.effect_name] if self.effect_severity is not None else self.effect_name
self.so = self.effect_name # VEP impacts are SO by default
# rules for being exonic.
# 1. the impact must be in the list of exonic impacts
# 3. must be protein_coding
self.is_exonic = 0
if self.effect_name in exonic_impacts and \
self.biotype == "protein_coding":
self.is_exonic = 1
# rules for being loss-of-function (lof).
# must be protein_coding
# must be a coding variant with HIGH impact
if self.effect_severity == "HIGH" and self.biotype == "protein_coding":
self.is_lof = 1
else:
self.is_lof = 0
# Rules for being coding
# must be protein_coding
# Exonic but not UTR's
if self.is_exonic and not (self.effect_name == "5_prime_UTR_variant" or
self.effect_name == "3_prime_UTR_variant"):
self.is_coding = 1
else:
self.is_coding = 0
# parse Polyphen predictions
if self.polyphen is not None:
self.polyphen_b = self.polyphen.split("(")
self.polyphen_pred = self.polyphen_b[0]
self.polyphen2 = self.polyphen_b[1].split(")")
self.polyphen_score = self.polyphen2[0]
else:
self.polyphen_pred = None
self.polyphen_score = None
# parse SIFT predictions
if self.sift is not None:
self.sift_b = self.sift.split("(")
self.sift_pred = self.sift_b[0]
self.sift2 = self.sift_b[1].split(")")
self.sift_score = self.sift2[0]
else:
self.sift_pred = None
self.sift_score = None
def _prep_fields(self, detail_string, labels):
"""Prepare a dictionary mapping labels to provided fields in the consequence.
"""
out = {}
for key, val in itertools.izip_longest(labels, detail_string.split("|")):
if val and val.strip():
if key is None:
out["warnings"] = val.strip()
else:
out[key.strip().lower()] = val.strip()
return out
def __str__(self):
return "\t".join([self.consequence, self.effect_severity, str(self.codon_change),
str(self.aa_change), str(self.aa_length), str(self.biotype),
str(self.ensembl_gene), str(self.gene), str(self.transcript),
str(self.exon), str(self.is_exonic), str(self.anno_id), str(self.polyphen_pred),
str(self.polyphen_score), str(self.sift_pred), str(self.sift_score),
str(self.is_coding), str(self.is_lof), str(self.so)])
def __repr__(self):
return self.__str__()
exonic_impacts = ["stop_gained",
"stop_lost",
"frameshift_variant",
"initiator_codon_variant",
"inframe_deletion",
"inframe_insertion",
"missense_variant",
"incomplete_terminal_codon_variant",
"stop_retained_variant",
"synonymous_variant",
"coding_sequence_variant",
"5_prime_UTR_variant",
"3_prime_UTR_variant",
"transcript_ablation",
"transcript_amplification",
"feature_elongation",
"feature_truncation"]
effect_names = ["splice_acceptor_variant", "splice_donor_variant",
"stop_gained", "stop_lost",
"non_coding_exon_variant", "frameshift_variant",
"initiator_codon_variant", "inframe_deletion",
"inframe_insertion", "missense_variant",
"splice_region_variant", "incomplete_terminal_codon_variant",
"stop_retained_variant", "synonymous_variant",
"coding_sequence_variant", "mature_miRNA_variant",
"5_prime_UTR_variant", "3_prime_UTR_variant",
"intron_variant", "NMD_transcript_variant",
"nc_transcript_variant", "upstream_gene_variant",
"downstream_gene_variant", "regulatory_region_variant",
"TF_binding_site_variant", "intergenic_variant",
"regulatory_region_ablation", "regulatory_region_amplification",
"transcript_ablation", "transcript_amplification",
"TFBS_ablation", "TFBS_amplification",
"feature_elongation", "feature_truncation"]
effect_dict = defaultdict()
effect_dict = {
'splice_acceptor_variant': 'splice_acceptor', 'splice_donor_variant': 's | plice_donor',
'stop_gained': 'stop_gain', 'stop_lost': 'stop_loss',
'non_coding_exon_variant': 'nc_exon', 'frameshift_variant': 'frame_shift',
'initiator_codon_variant': 'transcript_codon_change', 'inframe_deletion': ' | inframe_codon_loss',
'inframe_insertion': 'inframe_codon_gain', 'missense_variant': 'non_syn_coding',
'splice_region_variant': 'splice_region', 'incomplete_terminal_codon_variant': 'incomplete_terminal_codon',
'stop_retained_variant': 'synonymous_stop', 'synonymous_variant': 'synonymous_coding',
'coding_sequence_variant': 'CDS', 'mature_miRNA_variant': 'mature_miRNA',
'5_prime_UTR_variant': 'UTR_5_prime', '3_prime_UTR_variant': 'UTR_3_prime',
'intron_variant': 'intron', 'NMD_transcript_variant': 'NMD_transcript',
'nc_transcript_variant': 'nc_transcript', 'upstream_gene_variant': 'upstream',
'downstream_gene_variant': 'downstream', 'regulatory_region_variant': 'regulatory_region',
'TF_binding_site_variant': 'TF_binding_site', 'intergenic_variant': 'intergenic',
'regulatory_region_ablation': 'regulatory_region_ablation', 'regulatory_region_amplification': 'regulatory_region_amplification',
'transcript_ablation': 'transcript_ablation', 'transcript_amplification': 'transcript_amplification',
'TFBS_ablation': 'TFBS_ablation', 'TFBS_amplification': 'TFBS_amplification',
'feature_elongation': 'feature_elongation', 'feature_truncation': 'feature_truncation'}
effect_desc = ["The variant hits the splice acceptor site (2 basepair region at 3' end of an intron)", "The variant hits the splice donor site (2 basepair region at 5'end of an intron)",
"Variant causes a STOP codon", "Variant causes stop codon to be mutated into a non-stop codon",
"Variant causes a change in the non coding exon sequence", "Insertion or deletion causes a frame shift in coding sequence",
"Vari |
hanset/kanban | board/models.py | Python | gpl-3.0 | 1,141 | 0.004382 | import datetime
from django.db import models
from django.contrib.auth.models import User |
from django.utils import timezone
from django.forms import ModelForm
class Task(models.Model):
creator = models.ForeignKey(User, related_name='Creator')
owner = models.ForeignKey(User, related_name='Owner')
creation_date = models.DateTimeField('Date created', auto_now=True)
work_date = models.DateTimeField('Worked started', blank=True, null=True)
finished_date = models.DateTimeField('Date finished', blank=True, null=Tr | ue)
description = models.TextField(max_length=500)
name = models.CharField(max_length=40)
todo = 'To do'
inprogress = 'In Progress'
finished = 'Finished'
state_choices = (
(todo, 'To do'),
(inprogress, 'In progress'),
(finished, 'Finished'),
)
state = models.CharField(max_length=12, choices=state_choices, default=todo)
def __unicode__(self):
return self.description
class AddTaskForm(ModelForm):
class Meta:
model = Task
fields = ('name', 'description', 'state', 'creator', 'owner', 'work_date', 'finished_date')
|
coffeestats/coffeestats-django | coffeestats/caffeine_oauth2/urls.py | Python | mit | 1,723 | 0 | from __future__ import absolute_import
from django.conf.urls import url
from oauth2_provider import views
from .views import CoffeestatsApplicationRegistration, \
CoffeestatsApplicationDetail, \
CoffeestatsApplicationApproval, \
CoffeestatsApplicationRejection, \
CoffeestatsApplicationFullList
urlpatterns = (
url(r'^authorize/$', views.AuthorizationView.as_view(), name="authorize"),
url(r'^token/$', views.TokenView.as_view(), name="token"),
url(r'^revoke_token/$', views.RevokeTokenView.as_view(),
name="revoke-token"),
)
# Application management views
urlpatterns += (
url(r'^applications/$', views.ApplicationList.as_view(), name="list"),
url(r'^applications/register/$',
CoffeestatsApplicationRegistration.as_view(), name="register"),
url(r'^applications/(?P<pk>\d+)/$', CoffeestatsApplicationDetail.as_view(),
name="detail"),
url(r'^applications/(?P<pk>\d+)/delete/$',
views.ApplicationDelete.as_view(), name="delete"),
url(r'^applications/(?P<pk>\d+)/update/$',
views.ApplicationUpdate.as_view(), name="upd | ate"),
url(r'^applications/(?P<pk>\d+)/approve/$',
CoffeestatsApplicationApproval.as_view(), name="approve"),
url(r'^applications/(?P<pk>\d+)/reject/$',
CoffeestatsApplicationRejection.as_view(), name="reject"),
url(r'^all-applications/$',
CoffeestatsApplicationFullList.as_view() | , name="list_all"),
)
urlpatterns += (
url(r'^authorized_tokens/$', views.AuthorizedTokensListView.as_view(),
name="authorized-token-list"),
url(r'^authorized_tokens/(?P<pk>\d+)/delete/$',
views.AuthorizedTokenDeleteView.as_view(),
name="authorized-token-delete"),
)
|
i32algae/pruebais | usuario/views.py | Python | gpl-3.0 | 710 | 0.023944 | from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http.response import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from forms import SignUpForm
# Create your views here.
def signup(request):
if request.method == 'POST':
fo | rm = SignUpForm(request.POST)
if form.is_valid():
user=form.save()
user.set_password(u | ser.password)
user.save()
return HttpResponseRedirect('/')
else:
form = SignUpForm()
context={'form':form}
return render(request,'signup.html',context)
|
grnet/ganetimgr | apply/__init__.py | Python | gpl-3.0 | 48 | 0 | # default_app | _config = 'apply.apps.ApplyConfig' | |
cronuspaas/cronusagent | agent/agent/tests/unit/test_cleanup.py | Python | apache-2.0 | 7,566 | 0.006609 | from agent.tests import *
import pylons
import os
import logging
import time
import json
from pylons import config
from subprocess import Popen
from agent.controllers.service import ServiceController
from agent.lib.utils import rchown
from agent.lib.packagemgr import PackageMgr
from agent.tests.unit.test_util import commonSetup, checkStatus, mockDownloadPkg
from agent.tests.unit.test_util import commonTearDown
from agent.tests.unit.test_threadmgr import WaitThread
from agent.lib import utils, manifestutil
LOG = logging.getLogger(__name__)
class TestCleanupController(TestController):
def setUp(self):
commonSetup()
def tearDown(self):
commonTearDown()
@staticmethod
def rchown(path, uname):
uid, gid = utils.getUidGid(uname)
rchown(path, uid, gid)
def testDelete(self):
def deleteTestDir(service):
LOG.debug('************ service = %s' % service)
response = self.app.delete(url(controller='service', service=service, action='delete'), expect_errors = True)
# make sure the responses are correct
LOG.debug('status = ' + str(response.status_int))
# assert response.status_int == 500, "HTTP response != 500"
def makeTestDir(path):
os.makedirs(path)
os.makedirs(os.path.join(path, 'manifests'))
os.makedirs(os.path.join(path, 'installed-packages'))
def createManifests(mf_path):
os.makedirs(os.path.join(mf_path, 'm1.0', 'dummy_dir1.0'))
os.makedirs(os.path.join(mf_path, 'm2.0', 'dummy_dir2.0'))
latest = os.path.join(mf_path, 'm3.0')
os.makedirs(os.path.join(latest, 'dummy_dir3.0'))
utils.symlink(latest, os.path.join(mf_path, 'active'))
return (['m1.0', 'm2.0', 'm3.0'], 'm3.0')
def makePackageContent(path, pkgPath, pkgPropPath):
pkgFile = file(pkgPath, 'w')
for index in range(10):
pkgFile.write(('%s%s') % (index, index))
pkgFile.close()
pkgFile = file(pkgPropPath, 'w')
for index in range(10):
pkgFile.write(('%s%s') % (index, index))
pkgFile.close()
uname = pylons.config['agent_user_account']
TestCleanupController.rchown(path, uname)
def createTestThread(serviceName):
appGlobal = config['pylons.app_globals']
testTh = WaitThread(appGlobal.threadMgr, ServiceController.serviceCat(serviceName))
testTh.start()
return testTh
def startTestProcess():
cmd = utils.sudoCmd(["sleep", "5"], pylons.config['app_user_account'])
return Popen(cmd)
path1 = os.path.join(pylons.config['agent_root'], 'service_nodes', 'foo')
path2 = os.path.join(pylons.config['agent_root'], 'service_nodes', 'bar')
path3 = os.path.join(pylons.config['agent_root'], 'service_nodes', 'agent')
deleteTestDir('foo')
deleteTestDir('bar')
deleteTestDir('agent')
# make dirs
makeTestDir(path1)
makeTestDir(path2)
makeTestDir(path3)
all_mf, active_mf = createManifests(ServiceController.manifestPath('agent'))
uname = pylons.config['agent_user_account']
TestCleanupController.rchown(ServiceController.serviceRootPath(), uname)
pkgDir = PackageMgr.packagePath()
pkgPath = os.path.join(pkgDir, "foo.cronus")
pkgPropPath = os.path.join(pkgDir, "foo.cronus.prop")
makePackageContent(pkgDir, pkgPath, pkgPropPath)
# create threads
testThFoo = createTestThread('foo')
testThBar = createTestThread('bar')
testThAgent = createTestThread('agent')
# start process
process = startTestProcess()
# start testing
LOG.debug('************ start cleanup')
response = self.app.post(url(controller='cleanup', action='post'))
LOG.debug ('Delete response body = ' + response.body)
body = json.loads(response.body)
tm = time.time()
while (tm + 10 > time.time()):
response = self.app.get(body['status'], expect_errors = True)
LOG.debug ('Status response body = ' + response.body)
body = json.loads(response.body)
print body
if (body['progress'] == 100):
break
time.sleep(0.1)
# make sure the responses are correct
LOG.debug('status = ' + str(response.status_int))
assert response.status_int == 200, "HTTP response != 200"
time.sleep(0.1)
assert not os.path.exists(path1), 'service foo does exist or is not a directory'
assert not os.path.exists(path2), 'service bar does exist or is not a directory'
assert os.path.exists(path3), 'service agent does NOT exist or is not a directory'
assert not testThFoo.isAlive(), 'thread Foo is still alive'
assert not testThBar.isAlive(), 'thread Bar is still alive'
assert not testThAgent.isAlive(), 'thread Agent is still alive'
assert not os.path.exists(pkgPath), 'package foo exists'
assert not os.path.exists(pkgPropPath), 'package prop foo exists'
assert os.path.exists(pkgDir), 'package directory does not exist'
# ensure agent cleanup is proper
active_mf_path = manifestutil.manifestPath('agent', active_mf)
active_link = os.path.join(ServiceController.manifestPath('agent'), 'active')
all_mf.remove(active_mf)
actual_active_mf_path = utils.readlink(active_link)
self.assertTrue(os.path.exists(active_mf_path), 'active agent manifest got deleted but shouldn\t have')
self.assertTrue(os.path.exists(active_link), 'agent active link missing')
self.assertEqual(active_mf_path, actual_active_mf_path, 'agent active link pointing to some wrong manifest; link broken?')
for mf in all_mf:
agnt_mf_path = manifestutil.manifestPath('agent', mf)
self.assertFalse(os.path.exists(agnt_mf_path), 'non active agent mf %s should have been deleted' % mf)
# self.assert | NotEquals(process.poll(), None)
def test_delete_cronus_package(self):
package = "pkgA-1.2.0.unix"
package_url = "http://www.stackscaling.com/agentrepo/pkgA-1.2.0.unix.cronus"
package_path = os.path.join(PackageMgr.packagePath(), package + '.cronus')
# case 1 - package is not present
self.assertFalse(os.path.exists(package_path))
response = self.app.delete(url(controller='cleanup', package=package, action='deleteCronusPackage'), expect_errors | = True)
self.assertEqual(response.status_int, 200)
# case 2 - package is present
mockDownloadPkg(package_url)
body = json.dumps({'package': package_url, 'packageloc' : package + '.cronus'})
response = self.app.post(url(controller = 'distribution', action = 'startdownload', service = "dist"),
headers = {'Content-Type' : 'application/json'},
params = body, expect_errors = True)
self.assertEqual(response.status_int, 200)
checkStatus(self, "http_download", response, 100)
self.assertTrue(os.path.exists(package_path))
response = self.app.delete(url(controller='cleanup', package=package, action='deleteCronusPackage'), expect_errors = True)
self.assertEqual(response.status_int, 200)
self.assertFalse(os.path.exists(package_path))
|
maggotgdv/fofix | src/midi/constants.py | Python | gpl-2.0 | 6,364 | 0.008642 | # -*- coding: ISO-8859-1 -*-
###################################################
## Definitions of the different midi events
###################################################
## Midi channel events (The most usual events)
## also called "Channel Voice Messages"
NOTE_OFF = 0x80
# 1000cccc 0nnnnnnn 0vvvvvvv (channel, note, velocity)
NOTE_ON = 0x90
# 1001cccc 0nnnnnnn 0vvvvvvv (channel, note, velocity)
AFTERTOUCH = 0xA0
# 1010cccc 0nnnnnnn 0vvvvvvv (channel, note, velocity)
CONTINUOUS_CONTROLLER = 0xB0 # see Channel Mode Messages!!!
# 1011cccc 0ccccccc 0vvvvvvv (channel, controller, value)
PATCH_CHANGE = 0xC0
# 1100cccc 0ppppppp (channel, program)
CHANNEL_PRESSURE = 0xD0
# 1101cccc 0ppppppp (channel, pressure)
PITCH_BEND = 0xE0
# 1110cccc 0vvvvvvv 0wwwwwww (channel, value-lo, value-hi)
###################################################
## Channel Mode Messages (Continuous Controller)
## They share a status byte.
## The controller makes the difference here
# High resolution continuous controllers (MSB)
BANK_SELECT = 0x00
MODULATION_WHEEL = 0x01
BREATH_CONTROLLER = 0x02
FOOT_CONTROLLER = 0x04
PORTAMENTO_TIME = 0x05
DATA_ENTRY = 0x06
CHANNEL_VOLUME = 0x07
BALANCE = 0x08
PAN = 0x0A
EXPRESSION_CONTROLLER = 0x0B
EFFECT_CONTROL_1 = 0x0C
EFFECT_CONTROL_2 = 0x0D
GEN_PURPOSE_CONTROLLER_1 = 0x10
GEN_PURPOSE_CONTROLLER_2 = 0x11
GEN_PURPOSE_CONTROLLER_3 = 0x12
GEN_PURPOSE_CONTROLLER_4 = 0x13
# High resolution continuous controllers (LSB)
BANK_SELECT = 0x20
MODULATION_WHEEL = 0x21
BREATH_CONTROLLER = 0x22
FOOT_CONTROLLER = 0x24
PORTAMENTO_TIME = 0x25
DATA_ENTRY = 0x26
CHANNEL_VOLUME = 0x27
BALANCE = 0x28
PAN = 0x2A
EXPRESSION_CONTROLLER = 0x2B
EFFECT_CONTROL_1 = 0x2C
EFFECT_CONTROL_2 = 0x2D
GENERAL_PURPOSE_CONTROLLER_1 = 0x30
GENERAL_PURPOSE_CONTROLLER_2 = 0x31
GENERAL_PURPOSE_CONTROLLER_3 = 0x32
GENERAL_PURPOSE_CONTROLLER_4 = 0x33
# Switches
SUSTAIN_ONOFF = 0x40
PORTAMENTO_ONOFF = 0x41
SOSTENUTO_ONOFF = 0x42
SOFT_PEDAL_ONOFF = 0x43
LEGATO_ONOFF = 0x44
HOLD_2_ONOFF = 0x45
# Low resolution continuous controllers
SOUND_CONTROLLER_1 = 0x46 # (TG: Sound Variation; FX: Exciter On/Off)
SOUND_CONTROLLER_2 = 0x47 # (TG: Harmonic Content; FX: Compressor On/Off)
SOUND_CONTROLLER_3 = 0x48 # (TG: Release Time; FX: Distortion On/Off)
SOUND_CONTROLLER_4 = 0x49 # (TG: Attack Time; FX: EQ On/Off)
SOUND_CONTROLLER_5 = 0x4A # (TG: Brightness; FX: Expander On/Off)75 SOUND_CONTROLLER_6 (TG: Undefined; FX: Reverb OnOff)
SOUND_CONTROLLER_7 = 0x4C # (TG: Undefined; FX: Delay OnOff)
SOUND_CONTROLLER_8 = 0x4D # (TG: Undefined; FX: Pitch Transpose OnOff)
SOUND_CONTROLLER_9 = 0x4E # (TG: Undefined; FX: Flange/Chorus OnOff)
SOUND_CONTROLLER_10 = 0x4F # (TG: Undefined; FX: Special Effects OnOff)
GENERAL_PURPOSE_CONTROLLER_5 = 0x50
GENERAL_PURPOSE_CONTROLLER_6 = 0x51
GENERAL_PURPOSE_CONTROLLER_7 = 0x52
GENERAL_PURPOSE_CONTROLLER_8 = 0x53
PORTAMENTO_CONTROL = 0x54 # (PTC) (0vvvvvvv is the source Note number) (Detail)
EFFECTS_1 = 0x5B # (Ext. Effects Depth)
EFFECTS_2 = 0x5C # (Tremelo Depth)
EFFECTS_3 = 0x5D # (Chorus Depth)
EFFECTS_4 = 0x5E # (Celeste Depth)
EFFECTS_5 = 0x5F # (Phaser Depth)
DATA_INCREMENT = 0x60 # (0vvvvvvv is n/a; use 0)
DATA_DECREMENT = 0x61 # (0vvvvvvv is n/a; use 0)
NON_REGISTERED_PARAMETER_NUMBER = 0x62 # (LSB)
NON_REGISTERED_PARAMETER_NUMBER = 0x63 # (MSB)
REGISTERED_PARAMETER_NUMBER = 0x64 # (LSB)
REGISTERED_PARAMETER_NUMBER = 0x65 # (MSB)
# Channel Mode messages - (Detail)
ALL_SOUND_OFF = 0x78
RESET_ALL_CONTROLLERS = 0x79
LOCAL_CONTROL_ONOFF = 0x7A
ALL_NOTES_OFF = 0x7B
OMNI_MODE_OFF = 0x7C # (also causes ANO)
OMNI_MODE_ON = 0x7D # (also causes ANO)
MONO_MODE_ON = 0x7E # (Poly Off; also causes ANO)
POLY_MODE_ON = 0x7F # (Mono Off; also causes ANO)
###################################################
## System Common Messages, for all channels
SYSTEM_EXCLUSIVE = 0xF0
# 11110000 0iiiiiii 0ddddddd ... 11110111
MTC = 0xF1 # MIDI Time Code Quarter Frame
# 11110001
SONG_POSITION_POINTER = 0xF2
# 11110010 0vvvvvvv 0www | wwww (lo-position, hi-position)
SONG_SELECT = 0xF3
# 11110011 0sssssss | (songnumber)
#UNDEFINED = 0xF4
## 11110100
#UNDEFINED = 0xF5
## 11110101
TUNING_REQUEST = 0xF6
# 11110110
END_OFF_EXCLUSIVE = 0xF7 # terminator
# 11110111 # End of system exclusive
###################################################
## Midifile meta-events
SEQUENCE_NUMBER = 0x00 # 00 02 ss ss (seq-number)
TEXT = 0x01 # 01 len text...
COPYRIGHT = 0x02 # 02 len text...
SEQUENCE_NAME = 0x03 # 03 len text...
INSTRUMENT_NAME = 0x04 # 04 len text...
LYRIC = 0x05 # 05 len text...
MARKER = 0x06 # 06 len text...
CUEPOINT = 0x07 # 07 len text...
PROGRAM_NAME = 0x08 # 08 len text...
DEVICE_NAME = 0x09 # 09 len text...
MIDI_CH_PREFIX = 0x20 # MIDI channel prefix assignment (unofficial)
MIDI_PORT = 0x21 # 21 01 port, legacy stuff but still used
END_OF_TRACK = 0x2F # 2f 00
TEMPO = 0x51 # 51 03 tt tt tt (tempo in us/quarternote)
SMTP_OFFSET = 0x54 # 54 05 hh mm ss ff xx
TIME_SIGNATURE = 0x58 # 58 04 nn dd cc bb
KEY_SIGNATURE = 0x59 # ??? len text...
SPECIFIC = 0x7F # Sequencer specific event
FILE_HEADER = 'MThd'
TRACK_HEADER = 'MTrk'
###################################################
## System Realtime messages
## I don't supose these are to be found in midi files?!
TIMING_CLOCK = 0xF8
# undefined = 0xF9
SONG_START = 0xFA
SONG_CONTINUE = 0xFB
SONG_STOP = 0xFC
# undefined = 0xFD
ACTIVE_SENSING = 0xFE
SYSTEM_RESET = 0xFF
###################################################
## META EVENT, it is used only in midi files.
## In transmitted data it means system reset!!!
META_EVENT = 0xFF
# 11111111
###################################################
## Helper functions
def is_status(byte):
return (byte & 0x80) == 0x80 # 1000 0000
|
djoproject/pyshell | pyshell/utils/key.py | Python | gpl-3.0 | 5,081 | 0 | #!/usr/bin/env python -t
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Jonathan Delvaux <pyshell@djoproject.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pyshell.utils.exception import KeyStoreException
from pyshell.utils.string65 import isString
class CryptographicKey(object):
KEYTYPE_HEXA = 0
KEYTYPE_BIT = 1
def __init__(self, key_string):
# is it a string ?
if not isString(key_string):
raise KeyStoreException("("+self.__class__.__name__+") __init__, "
"invalid key string, expected a string, "
"got '"+str(type(key_string))+"'")
key_string = key_string.lower()
# find base
if key_string.startswith("0x"):
try:
int(key_string, 16)
except ValueError as ve:
raise KeyStoreException("("+self.__class__ | .__name__ | +") "
"__init__, invalid hexa string, start"
" with 0x but is not valid: "+str(ve))
self.keyType = CryptographicKey.KEYTYPE_HEXA
self.key = key_string[2:]
temp_key_size = float(len(key_string) - 2)
temp_key_size /= 2
self.keySize = int(temp_key_size)
if temp_key_size > int(temp_key_size):
self.keySize += 1
self.key = "0"+self.key
elif key_string.startswith("0b"):
try:
int(key_string, 2)
except ValueError as ve:
raise KeyStoreException("("+self.__class__.__name__+") "
"__init__, invalid binary string, "
"start with 0b but is not valid: " +
str(ve))
self.keyType = CryptographicKey.KEYTYPE_BIT
self.key = key_string[2:]
self.keySize = len(self.key)
else:
raise KeyStoreException("("+self.__class__.__name__+") __init__, "
"invalid key string, must start with 0x or"
" 0b, got '"+key_string+"'")
def __str__(self):
if self.keyType == CryptographicKey.KEYTYPE_HEXA:
return "0x"+self.key
else:
return "0b"+self.key
def __repr__(self):
if self.keyType == CryptographicKey.KEYTYPE_HEXA:
return ("0x"+self.key+" ( HexaKey, size="+str(self.keySize) +
" byte(s))")
else:
return ("0b"+self.key+" ( BinaryKey, size="+str(self.keySize) +
" bit(s))")
def getKey(self, start, end=None, padding_enable=True):
if end is not None and end < start:
return ()
# part to extract from key
if start >= self.keySize:
if end is None or not padding_enable:
return ()
key_part = []
else:
limit = self.keySize
if end is not None and end <= self.keySize:
limit = end
key_part = []
if self.keyType == CryptographicKey.KEYTYPE_HEXA:
# for b in self.key[start*2:limit*2]:
for index in range(start*2, limit*2, 2):
key_part.append(int(self.key[index:index+2], 16))
else:
for b in self.key[start:limit]:
key_part.append(int(b, 2))
# padding part
if padding_enable and end is not None and end > self.keySize:
padding_length = end - self.keySize
if padding_length > 0:
key_part.extend([0] * padding_length)
return key_part
def getKeyType(self):
return self.keyType
def getKeySize(self):
return self.keySize
def getTypeString(self):
if self.keyType == CryptographicKey.KEYTYPE_HEXA:
return "byte"
else:
return "bit"
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.keyType == other.keyType and
self.key == other.key)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(str(self.keyType)+self.key)
def __int__(self):
if self.keyType == CryptographicKey.KEYTYPE_HEXA:
return int(self.key, 16)
else:
return int(self.key, 2)
|
yuxans/badgirl | src/coin.py | Python | gpl-2.0 | 1,861 | 0.020956 | #!/usr/bin/env python
# Copyright (c) 2003 Phil Gregory
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This p | rogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have rece | ived a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
"""coin.py - flips a coin on demand"""
handler_list=["coin"]
from moobot_module import MooBotModule
class coin(MooBotModule):
def __init__(self):
self.regex="^coin"
def handler(self, **args):
"""If given a list of alternatives separated by 'or', picks
from among them. Otherwise picks either heads or tails from a
virtual coin."""
import random, re
from irclib import Event
# Strip "botname: coin" off the front.
str = " ".join(args["text"].split()[2:])
# Attempt some rudimentary first-to-second person changes.
str = re.sub('\b([Ii]|[Mm][Ee])\b', 'you', str)
str = re.sub('\b[Mm][Yy]\b', 'your', str)
# Prepare the options for decision.
str = re.sub('\?', '', str)
options = re.split(',?\s+or\s+', str)
if len(options) <= 1 or random.randint(1, 10) == 1:
options = ["Heads!", "Tails!"]
third = "Edge!?"
elif len(options) == 2:
third = "Both!"
else:
third = "All of them!"
choice = random.choice(options)
if (random.randint(1, 100) == 1):
choice = third
return Event("privmsg", "", self.return_to_sender(args),
[choice])
|
ruchee/vimrc | vimfiles/bundle/vim-python/submodules/pylint/tests/input/func_i0012.py | Python | mit | 78 | 0 | # pylint:enable=W0404
"""check | warning on local enabling
"""
__revision__ = 1 | |
hgmeyer/BIVAC | sw/image_processing/remapping/Unwarper.py | Python | gpl-3.0 | 3,590 | 0.002507 | """
Implements a task for unwarping the camera images
"""
import multiprocessing
import cv2
from image_processing.remapping.OCameraModel import OCameraModel
class Unwarper(multiprocessing.Process):
"""
This class implements a task for unwarping the images obtained from a camera with a fisheye lens
"""
def __init__(self, input_queue, output_queue,
width_rescaled, height_rescaled,
scaling_factor,
calibration_file,
flip_image=False):
"""
Constructor of the Unwarping class
:param input_queue: multiprocessing.JoinableQueue containing input data
:param output_queue: multiprocessing.Queue containing output data
:param width_rescaled: width of the remapped image
:param height_rescaled: height of the remapped image
:param flip_image: flips the image upside down if set to true
:param calibration_file: calibration file (from OCamModel toolbox) containing camera calibration data
"""
# Initialize multiprocessing.Process parent
multiprocessing.Process.__init__(self)
# Establish queues
self._input_queue = input_queue
self._output_queue = output_queue
# Exit event for stopping process
self._exit = multiprocessing.Event()
# Initialize variables
self._img = None
self._width_rescaled = width_rescaled
self._height_rescaled = height_rescaled
self._flip_image = flip_image
self._scaling_factor = scaling_factor
self._calibration_file = calibration_file
# Initialize omidirectional camera model
self._ocammodel = OCameraModel()
# Parse calibration file
self._ocammodel.get_ocam_model(self._calibration_file)
# Get undistortion LUTs
self._mapx, self._mapy = self._ocammodel.create_perspective_undistortion_lut(self._height_rescaled,
self._width_rescaled,
self._scaling_factor)
def run(self):
"""
Function called when task is started (e.g. task.start()). Overrides run function of multiprocessing. Process
parent
"""
# Clear exit event just to be sure
self._exit.clear()
# While exit event is not set...
while not se | lf._exit.is_set():
# ...get data from input queue
self._parse_input()
# ...remap image
img_remapped = cv2.remap(self._img, self._mapx, self._mapy, cv2.INTER_AREA)
# ...flip image
if self._flip_image:
img_remapped = cv2.flip(img_remapped, 0 | )
img_remapped = cv2.flip(img_remapped, 1)
# ...put image in output queue
self._output_queue.put(({'remapped_image': img_remapped}))
def terminate(self):
"""
Called when task is terminated. Overwrites multiprocessing.Process.terminate() function
"""
# Set exit event
self._exit.set()
def _parse_input(self):
"""
Parse data from input queue
"""
# Get data from queue
data = self._input_queue.get()
if data is not None:
# Parse data from input queue
try:
self._img = data['camera_image']
except KeyError as e:
raise KeyError('Key ' + e.args[0] + ' not found in input data!')
|
zachjanicki/osf.io | website/addons/dataverse/tests/test_model.py | Python | apache-2.0 | 3,530 | 0.0017 | from nose.tools import * # noqa
import mock
from tests.base import get_default_metaschema
from framework.auth.decorators import Auth
from website.addons.base.testing import models
from website.addons.dataverse.model import AddonDataverseNodeSettings
from website.addons.dataverse.tests.factories import (
DataverseAccountFactory, DataverseNodeSettingsFactory,
DataverseUserSettingsFactory
)
from website.addons.dataverse.tests import utils
class TestNodeSettings(models.OAuthAddonNodeSettingsTestSuiteMixin, utils.DataverseAddonTestCase):
short_name = 'dataverse'
full_name = 'Dataverse'
ExternalAccountFactory = DataverseAccountFactory
NodeSettingsFactory = DataverseNodeSettingsFactory
NodeSettingsClass = AddonDataverseNodeSettings
UserSettingsFactory = DataverseUserSettingsFactory
def _node_settings_class_kwargs(self, node, user_settings):
return {
'user_settings': self.user_settings,
'_dataset_id': '1234567890',
'owner': self.node
}
def setUp(self): |
su | per(TestNodeSettings, self).setUp()
self.set_node_settings(self.node_settings)
@mock.patch('website.archiver.tasks.archive')
def test_does_not_get_copied_to_registrations(self, mock_archive):
registration = self.project.register_node(
schema=get_default_metaschema(),
auth=Auth(user=self.project.creator),
data='hodor',
)
assert_false(registration.has_addon('dataverse'))
## Overrides ##
def test_create_log(self):
action = 'file_added'
filename = 'pizza.nii'
nlog = len(self.node.logs)
self.node_settings.create_waterbutler_log(
auth=Auth(user=self.user),
action=action,
metadata={'path': filename, 'materialized': filename},
)
self.node.reload()
assert_equal(len(self.node.logs), nlog + 1)
assert_equal(
self.node.logs[-1].action,
'{0}_{1}'.format(self.short_name, action),
)
assert_equal(
self.node.logs[-1].params['filename'],
filename
)
def test_set_folder(self):
dataverse = utils.create_mock_dataverse()
dataset = utils.create_mock_dataset()
self.node_settings.set_folder(dataverse, dataset, auth=Auth(self.user))
# Folder was set
assert_equal(self.node_settings.folder_id, dataset.id)
# Log was saved
last_log = self.node.logs[-1]
assert_equal(last_log.action, '{0}_dataset_linked'.format(self.short_name))
def test_serialize_credentials(self):
credentials = self.node_settings.serialize_waterbutler_credentials()
assert_is_not_none(self.node_settings.external_account.oauth_secret)
expected = {'token': self.node_settings.external_account.oauth_secret}
assert_equal(credentials, expected)
def test_serialize_settings(self):
settings = self.node_settings.serialize_waterbutler_settings()
expected = {
'host': self.external_account.oauth_key,
'doi': self.node_settings.dataset_doi,
'id': self.node_settings.dataset_id,
'name': self.node_settings.dataset,
}
assert_equal(settings, expected)
class TestUserSettings(models.OAuthAddonUserSettingTestSuiteMixin, utils.DataverseAddonTestCase):
short_name = 'dataverse'
full_name = 'Dataverse'
ExternalAccountFactory = DataverseAccountFactory
|
klim-iv/phantomjs-qt5 | src/webkit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/standalone.py | Python | bsd-3-clause | 44,901 | 0.00029 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Standalone WebSocket server.
Use this file to launch pywebsocket without Apache HTTP Server.
BASIC USAGE
Go to the src directory and run
$ python mod_pywebsocket/standalone.py [-p <ws_port>]
[-w <websock_handlers>]
[-d <document_root>]
<ws_port> is the port number to use for ws:// connection.
<document_root> is the path to the root directory of HTML files.
<websock_handlers> is the path to the root directory of WebSocket handlers.
If not specified, <document_root> will be used. See __init__.py (or
run $ pydoc mod_pywebsocket) for how to write WebSocket handlers.
For more detail and other options, run
$ python mod_pywebsocket/standalone.py --help
or see _build_option_parser method below.
For trouble shooting, adding "--log_level debug" might help you.
TRY DEMO
Go to the src directory a | nd run
$ python standalone.py -d example
to launch pywebsocket with the sample handler and html on port 80. Open
http://localhost/console.html, click the connect button, type something into
the text box next to the send button and click the send button. If everything
is working, you'll see the message you typed echoed by the server.
SUPPORTING TLS
To support TLS, run standalone | .py with -t, -k, and -c options.
Note that when ssl module is used and the key/cert location is incorrect,
TLS connection silently fails while pyOpenSSL fails on startup.
SUPPORTING CLIENT AUTHENTICATION
To support client authentication with TLS, run standalone.py with -t, -k, -c,
and --tls-client-auth, and --tls-client-ca options.
E.g., $./standalone.py -d ../example -p 10443 -t -c ../test/cert/cert.pem -k
../test/cert/key.pem --tls-client-auth --tls-client-ca=../test/cert/cacert.pem
CONFIGURATION FILE
You can also write a configuration file and use it by specifying the path to
the configuration file by --config option. Please write a configuration file
following the documentation of the Python ConfigParser library. Name of each
entry must be the long version argument name. E.g. to set log level to debug,
add the following line:
log_level=debug
For options which doesn't take value, please add some fake value. E.g. for
--tls option, add the following line:
tls=True
Note that tls will be enabled even if you write tls=False as the value part is
fake.
When both a command line argument and a configuration file entry are set for
the same configuration item, the command line value will override one in the
configuration file.
THREADING
This server is derived from SocketServer.ThreadingMixIn. Hence a thread is
used for each request.
SECURITY WARNING
This uses CGIHTTPServer and CGIHTTPServer is not secure.
It may execute arbitrary Python code or external programs. It should not be
used outside a firewall.
"""
import BaseHTTPServer
import CGIHTTPServer
import SimpleHTTPServer
import SocketServer
import ConfigParser
import base64
import httplib
import logging
import logging.handlers
import optparse
import os
import re
import select
import socket
import sys
import threading
import time
from mod_pywebsocket import common
from mod_pywebsocket import dispatch
from mod_pywebsocket import handshake
from mod_pywebsocket import http_header_util
from mod_pywebsocket import memorizingfile
from mod_pywebsocket import util
_DEFAULT_LOG_MAX_BYTES = 1024 * 256
_DEFAULT_LOG_BACKUP_COUNT = 5
_DEFAULT_REQUEST_QUEUE_SIZE = 128
# 1024 is practically large enough to contain WebSocket handshake lines.
_MAX_MEMORIZED_LINES = 1024
# Constants for the --tls_module flag.
_TLS_BY_STANDARD_MODULE = 'ssl'
_TLS_BY_PYOPENSSL = 'pyopenssl'
class _StandaloneConnection(object):
"""Mimic mod_python mp_conn."""
def __init__(self, request_handler):
"""Construct an instance.
Args:
request_handler: A WebSocketRequestHandler instance.
"""
self._request_handler = request_handler
def get_local_addr(self):
"""Getter to mimic mp_conn.local_addr."""
return (self._request_handler.server.server_name,
self._request_handler.server.server_port)
local_addr = property(get_local_addr)
def get_remote_addr(self):
"""Getter to mimic mp_conn.remote_addr.
Setting the property in __init__ won't work because the request
handler is not initialized yet there."""
return self._request_handler.client_address
remote_addr = property(get_remote_addr)
def write(self, data):
"""Mimic mp_conn.write()."""
return self._request_handler.wfile.write(data)
def read(self, length):
"""Mimic mp_conn.read()."""
return self._request_handler.rfile.read(length)
def get_memorized_lines(self):
"""Get memorized lines."""
return self._request_handler.rfile.get_memorized_lines()
class _StandaloneRequest(object):
"""Mimic mod_python request."""
def __init__(self, request_handler, use_tls):
"""Construct an instance.
Args:
request_handler: A WebSocketRequestHandler instance.
"""
self._logger = util.get_class_logger(self)
self._request_handler = request_handler
self.connection = _StandaloneConnection(request_handler)
self._use_tls = use_tls
self.headers_in = request_handler.headers
def get_uri(self):
"""Getter to mimic request.uri.
This method returns the raw data at the Request-URI part of the
Request-Line, while the uri method on the request object of mod_python
returns the path portion after parsing the raw data. This behavior is
kept for compatibility.
"""
return self._request_handler.path
uri = property(get_uri)
def get_unparsed_uri(self):
"""Getter to mimic request.unparsed_uri."""
return self._request_handler.path
unparsed_uri = property(get_unparsed_uri)
def get_method(self):
"""Getter to mimic request.method."""
return self._request_handler.command
method = property(get_method)
def get_protocol(self):
"""Getter to mimic request.protocol."""
return self._request_handler.request_version
protocol = property(get_protocol)
def is_https(self):
"""Mimic request.is_https()."""
return self._use_tls
def _import_ssl():
global ssl
try:
import ssl
return True
except ImportError:
return False
def _import_pyopenssl():
global OpenSSL
try:
import OpenSSL.SSL
return True
except ImportError:
ret |
sch3m4/SerialCrypt | apps/locate.py | Python | bsd-3-clause | 724 | 0.03453 | #!/usr/bin/env python
#
# Written by Chema Garcia (aka sch3m4)
# Contact: chema@safetybits.net || http://safetybits.net || @sch3m4
#
import | serial.tools.list_ports
from SerialCrypt import Devices
def locateDevice(devid):
'''
Returns the serial port path of the arduino if found, or None if it isn't connected
'''
retval = None
for port in serial.tools.list_ports.comports():
if port[2][:len(devid)] == devid:
retval = port[0]
break
return retval
def main():
print "HSM Device: %s" | % locateDevice ( Devices.DEVICE_CRYPT_ID )
print "uToken Device: %s" % locateDevice ( Devices.DEVICE_UTOKEN_ID )
print "Debug Device: %s" % locateDevice ( Devices.DEVICE_DEBUG_ID )
if __name__ == "__main__":
main()
|
garmann/playground | python/python_mysql_queue/app.py | Python | mit | 2,098 | 0.035272 | #!/usr/bin/env python3
from classes.myqueue import MyQueue
import time # time.sleep(0.02)
import random # random.randint(1, 100 | )
im | port socket # socket.gethostname()
import sys
import argparse
CONF_DB = {
'server': 'localhost',
'user': 'root',
'pass': 'x',
'db': 'myqueue'
}
def worker_create(q, amount):
# makes objects in state new
hostname = socket.gethostname()
while amount > 0:
amount -= 1
objectname = "{}_{}_{}".format(hostname, int(time.time()), random.randint(1,10000000))
q.object_add(objectname)
def worker_update(q, amount):
# changes objects into status running
while amount > 0:
amount -= 1
try:
objectid = q.object_get_object_bystate('new')[0]['object']
q.object_update_status(name=objectid, status='running')
except IndexError: # happens when there are no new objects
pass
def worker_finish(q, amount):
# changes objects into status done
while amount > 0:
amount -= 1
try:
objectid = q.object_get_object_bystate('running')[0]['object']
q.object_update_status(name=objectid, status='done')
except IndexError: # happens when there are no running objects
pass
def main(args):
q = MyQueue(CONF_DB)
with q:
# using "with" ensures db exit, not worked on my testing with the db library
# see __enter__ & __exit__ in MyQueue Class
if args.type == 'create':
worker_create(q, args.amount)
elif args.type == 'update':
worker_update(q, args.amount)
elif args.type == 'finish':
worker_finish(q, args.amount)
else:
print('shit happens')
sys.exit(1)
# mysql> select status, count(object) as count from queue group by status order by count DESC
# set global general_log = 'ON';
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='its me, the python queue...')
parser.add_argument('type',
default='create',
help='for type: choose between create, update and finish',
choices=['create', 'update', 'finish'],
type=str)
parser.add_argument('--amount',
type=int,
default=1000,
help='amount to create/modify/finish')
args = parser.parse_args()
main(args)
|
opethe1st/CompetitiveProgramming | CodeChef/2017/LongChallenge/September/ChefSum.py | Python | gpl-3.0 | 284 | 0.007042 | def solution(A):
mini = A[0]
| minindex = 0
for i in xrange(len(A)):
if mini > A[i]:
mini = A[i]
minindex = i
return minindex+1
T = input()
for _ in xrange(T):
N = input()
A = m | ap(int, raw_input().split())
print(solution(A)) |
Zolertia/openthread | tests/scripts/thread-cert/Cert_5_5_01_LeaderReset.py | Python | bsd-3-clause | 3,068 | 0.001304 | #!/usr/bin/python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote prod | ucts
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE |
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import node
LEADER = 1
ROUTER = 2
class Cert_5_5_1_LeaderReset(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,3):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER].set_panid(0xface)
self.nodes[ROUTER].set_mode('rsdn')
self.nodes[ROUTER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER].enable_whitelist()
self.nodes[ROUTER].set_router_selection_jitter(1)
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
rloc16 = self.nodes[LEADER].get_addr16()
self.nodes[LEADER].stop();
time.sleep(5)
self.nodes[LEADER].start()
time.sleep(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.assertEqual(self.nodes[LEADER].get_addr16(), rloc16)
if __name__ == '__main__':
unittest.main()
|
tmetsch/graph_stitcher | tests/stitcher_iterative_repair_test.py | Python | mit | 7,668 | 0 | """
Unittests for iterative_repair module.
"""
import json
import unittest
import networkx as nx
from networkx.readwrite import json_graph
from stitcher import iterative_repair
def _sample_data():
cont = nx.DiGraph()
cont.add_node('1', **{'type': 'a', 'group': 'foo', 'rank': 1.0})
cont.add_node('2', **{'type': 'b', 'group': 'foo', 'rank': 1.0})
cont.add_node('3', **{'type': 'b', 'group': 'bar', 'rank': 2.0})
cont.add_node('4', **{'type': 'a', 'group': 'bar', 'rank': 2.0})
cont.add_edge('1', '2')
cont.add_edge('2', '3')
cont.add_edge('4', '3')
req = nx.DiGraph()
req.add_node('a', **{'type': 'x'})
req.add_node('b', **{'type': 'y'})
req.add_edge('a', 'b')
return cont, req
class IterativeRepairStitcherTest(unittest.TestCase):
"""
Test for class IterativeRepairStitcher.
"""
def setUp(self) -> None:
container_tmp = json.load(open('data/container.json'))
self.container = json_graph.node_link_graph(container_tmp,
directed=True)
request_tmp = json.load(open('data/request.json'))
self.request = json_graph.node_link_graph(request_tmp,
directed=True)
rels = json.load(open('data/stitch.json'))
self.cut = iterative_repair.IterativeRepairStitcher(rels)
# Test for success.
def test_stitch_for_success(self):
"""
Test fo success.
"""
self.cut.stitch(self.container, self.request)
def test_find_conflicts_for_success(self):
"""
Test for success.
"""
cont, req = _sample_data()
condy = {'attributes': [('eq', ('a', ('foo', 'bar')))]}
self.cut.find_conflicts(cont, req, condy, {'a': '1'})
def test_next_conflict_for_success(self):
"""
Test for success.
"""
self.cut.next_conflict([('foo', 'bar'), ('bar', 'foo')])
def test_fix_for_success(self):
"""
Test for success.
"""
self.cut.fix_conflict(('k', ('eq', ('rank', 5))),
self.container,
self.request,
{'k': 'A'})
# Test for failure.
def test_stitch_for_failure(self):
"""
Test for failure.
"""
cont = nx.DiGraph()
cont.add_node('foo', **{'type': 'a'})
req = nx.DiGraph()
req.add_node('bar', **{'type': 'y'}) # no matching type in container.
self.assertRaises(Exception, self.cut.stitch, cont, req)
# test with unsolvable case.
cont, req = _sample_data()
res = self.cut.stitch(cont, req, {
'attributes':
[('eq', ('a', ('buuha', 'asdf')))]
})
self.assertTrue(len(res) == 0)
# Test for sanity.
def test_stitch_for_sanity(self):
"""
Test for sanity.
"""
condy = {
'attributes': [('eq', ('k', ('rank', 5)))]
}
res = self.cut.stitch(self.container, self.request, conditions=condy)
# TODO: test with multigraph request!
self.assertIsInstance(res, list)
self.assertIsInstance(res[0], nx.DiGraph)
def test_find_conflicts_for_sanity(self):
"""
Test for sanity.
"""
cont, req = _sample_data()
# a doesn't have foo attr.
condy = {'a': [('eq', ('foo', 'bar'))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a is in group foo
condy = {'a': [('neq', ('group', 'foo'))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a's rank is 1.0
condy = {'a': [('lt', ('rank', 0.5))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a's rank is 1.0
condy = {'a': [('gt', ('rank', 2.0))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a's group name is a word
condy = {'a': [('regex', ('group', '\\d'))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a & b not on same node...
condy = {'a': [('same', 'b')], 'b': [('same', 'a')]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1', 'b': '2'})
self.assertEqual(condy['a'][0], res[0][1])
# a & b not on same node...
condy = {'a': [('diff', 'b')], 'b': [('diff', 'a')]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1', 'b': '1'})
self.assertEqual(condy['a'][0], res[0][1])
# a & b not in same group
condy = {'a': [('share', ('group', ['b']))],
'b': [('share', ('group', ['a']))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1', 'b': '3'})
self.assertEqual(condy['a'][0], res[0][1])
# a & b in same group
condy = {'a': [('nshare', ('group', ['b']))],
'b': [('nshare', ('group', ['a']))]}
res = self.cut.find_conflicts(cont, req, condy, {'a': '1', 'b': '2'})
self.assertEqual(condy['a'][0], res[0][1])
def test_next_conflict_for_sanity(self):
"""
Test for sanity.
"""
res = self.cut.next_conflict(['foo', 'bar'])
self.assertIsNotNone(res)
def test_fix_for_sanity(self):
"""
Test for sanity.
"""
cont, req = _sample_data()
mapping = {'a': '1'}
self.cut.fix_conflict(('a', ('eq', ('foo', 'bar'))), cont, req,
mapping)
self.assertIn('a', mapping)
class TestConvertConditions(unittest.TestCase):
"""
Test the condition converter.
"""
def setUp(self) -> None:
self.cond = {
'attributes': [('eq', ('a', ('foo', 'y'))),
('neq', ('a', ('foo', 5))),
('lt', ('a', ('foo', 4))),
('lg', ('a', ('foo', 7))),
('regex', ('a', ('foo', '^a')))],
'compositions': [('same', ('1', '2')),
('diff', ('3', '4')),
('diff', ('3', '1')),
('share', ('group', ['x', 'y'])),
('nshare', ('group', ['a', 'b']))]
}
# Test for success.
def test_convert_for_success(self):
"""
Test for success.
"""
iterative_repair.convert_conditions(self.cond)
# Test for failure
# N/A
# Test for sanity.
def test_conve | rt_for_sanity(self):
"""
Test for sanity.
"""
res = iterative_repair.convert_conditions(self.cond)
self.assertIn('a', res)
self.assertIn('b', res)
self.assertIn('x', res)
self.assertIn('y', res)
self.assertIn('1', res)
self.assertIn('2', res)
self.assertIn('3', res)
self.assertIn('4', res)
self.assertTrue(len(res['a']) == 6) # eq, neq, | lt, lg, regex, nshare
self.assertTrue(len(res['b']) == 1) # nshare
self.assertTrue(len(res['x']) == 1) # share
self.assertTrue(len(res['y']) == 1) # share
self.assertTrue(len(res['1']) == 2) # same, diff
self.assertTrue(len(res['2']) == 1) # same
self.assertTrue(len(res['3']) == 2) # 2x diff
self.assertTrue(len(res['4']) == 1) # diff
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.