repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
edx/edx-platform | openedx/core/djangoapps/api_admin/tests/test_models.py | Python | agpl-3.0 | 6,287 | 0.003022 | # pylint: disable=missing-docstring
from smtplib import SMTPException
from unittest import mock
import pytest
import ddt
from django.db import IntegrityError
from django.test import TestCase
from openedx.core.djangoapps.api_admin.models import ApiAccessConfig, ApiAccessRequest
from openedx.core.djangoapps.api_admin.models import log as model_log
from openedx.core.djangoapps.api_admin.tests.factories import ApiAccessRequestFactory
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from openedx.core.djangolib.testing.utils import skip_unless_lms
from common.djangoapps.student.tests.factories import UserFactory
@ddt.ddt
@skip_unless_lms
class ApiAccessRequestTests(TestCase):
def setUp(self):
super().setUp()
self.user = UserFactory()
self.request = ApiAccessRequestFactory(user=self.user)
def test_default_status(self):
assert self.request.status == ApiAccessRequest.PENDING
assert not ApiAccessRequest.has_api_access(self.user)
def test_approve(self):
self.request.approve()
assert self.request.status == ApiAccessRequest.APPROVED
def test_deny(self):
self.request.deny()
assert self.request.status == ApiAccessRequest.DENIED
def test_nonexistent_request(self):
"""Test that users who have not requested API access do not get it."""
other_user = UserFactory()
assert not ApiAccessRequest.has_api_access(other_user)
@ddt.data(
(ApiAccessRequest.PENDING, False),
(ApiAccessRequest.DENIED, False),
(ApiAccessRequest.APPROVED, True),
)
@ddt.unpack
def test_has_access(self, status, should_have_access):
self.request.status = status
self.request.save()
assert ApiAccessRequest.has_api_access(self.user) == should_have_access
def test_unique_per_user(self):
with pytest.raises(IntegrityError):
ApiAccessRequestFactory(user=self.user)
def test_no_access(self):
self.request.delete()
assert ApiAccessRequest.api_access_status(self.user) is None
def test_unicode(self):
request_unicode = str(self.request)
assert self.request.website in request_unicode
assert self.request.status in request_unicode
def test_retire_user_success(self):
retire_result = self.request.retire_user(self.user)
assert retire_result
assert self.request.company_address == ''
assert self.request.company_name == ''
assert self.request.website == ''
assert self.request.reason == ''
def test_retire_user_do_not_exist(self):
user2 = UserFactory()
retire_result = self.request.retire_user(user2)
assert not retire_result
class ApiAccessConfigTests(TestCase):
def test_unicode(self):
assert str(ApiAccessConfig(enabled=True)) == 'ApiAccessConfig [enabled=True]'
assert str(ApiAccessConfig(enabled=False)) == 'ApiAccessConfig [enabled=False]'
@skip_unless_lms
class ApiAccessRequestSignalTests(TestCase):
def setUp(self):
super().setUp()
self.user = UserFactory()
self.api_access_request = ApiAccessRequest(user=self.user, site=SiteFactory())
self.send_new_pending_email_function = 'openedx.core.djangoapps.api_admin.models._send_new_pending_email'
self.send_decision_email_function = 'openedx.core.djangoapps.api_admin.models._send_decision_email'
def test_save_signal_success_new_email(self):
""" Verify that initial save sends new email and no decision email. """
with mock.patch(self.send_new_pending_email_function) as mock_new_email:
with mock.patch(self.send_decision_email_function) as mock_decision_email:
self.api_access_request.save()
mock_new_email.assert_called_once_with(self.api_access_request)
assert not mock_decision_email.called
def test_save_signal_success_decision_email(self):
""" Verify that updating request status sends decision email and no new email. """
self.api_access_request.save()
with mock.patch(self.send_new_pending_email_function) as mock_new_email:
with mock.patch(self.send_decision_email_function) as mock_decision_email:
self.api_access_request.approve()
mock_decision_email.assert_called_once_with(self.api_access_request)
assert not mock_new_email.called
def test_save_signal_success_no_emails(self):
""" Verify that updating request status again sends no emails. """
self.api_access_request.save()
self.api_access_request.approve()
with mock.patch(self.send_new_pending_email_function) as mock_new_email:
with mock.patch(self.send_decision_email_function) as mock_decision_email:
self.api_access_request.deny()
assert not mock_decision_email.called
assert not mock_new_email.called
def test_save_signal_failure_email(self):
""" Verify that saving still functions even on email errors. """
assert self.api_access_request.id is None
mail_function = 'openedx.core.djangoapps.api_admin.models.send_mail'
with mock.patch(mail_function, side_effect=SMTPException):
with mock.patch.object(model_log, 'exception') as mock_model_log_exception:
self.api_access_request.save()
# Verify that initial save logs email errors properly
mock_model_log_exception.assert_called_once_with(
| 'Error sending API user notification email for request [%s].', self.api_access_request.id
)
# Verify object saved
assert self.api_access_request.id is not None
with mock.patch(mail_function, side_effect=SMTPException):
with mock.patch.object(model_log, 'exception') as mock_model_log_exception:
self.api_access_request.approve()
# | Verify that updating request status logs email errors properly
mock_model_log_exception.assert_called_once_with(
'Error sending API user notification email for request [%s].', self.api_access_request.id
)
# Verify object saved
assert self.api_access_request.status == ApiAccessRequest.APPROVED
|
huntxu/fuel-web | nailgun/nailgun/rpc/__init__.py | Python | apache-2.0 | 2,731 | 0.000366 | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import functools
import amqp.exceptions as amqp_exceptions
from kombu import Connection
from kombu import Exchange
from oslo_serialization import jsonutils
from kombu import Queue
from nailgun.logger import logger
from nailgun.settings import settings
from nailgun.rpc import utils
creds = (
("userid", "guest"),
("password", "guest"),
("hostname", "localhost"),
("port", "5672"),
)
conn_str = 'amqp://{0}:{1}@{2}:{3}//'.format(
*[settings.RABBITMQ.get(*cred) for cred in creds]
)
naily_exchange = Exchange(
'naily',
'topic',
durable=True
)
naily_queue = Queue(
'naily',
exchange=naily_exchange,
routing_key='naily'
)
naily_service_exchange = Exchange(
'naily_service',
'fanout',
durable=False,
auto_delete=True
)
naily_service_queue = Queue(
'naily_service',
exchange=naily_service_exchange
)
nailgun_exchange = Exchange(
'nailgun',
'topic',
durable=True
)
nailgun_queue = Queue(
'nailgun',
exchange=nailgun_exchange,
routing_key='nailgun'
)
def cast(name, message, service=False):
logger.debug(
"RPC cast to orchestrator:\n{0}".format(
jsonutils.dumps(message, indent=4)
)
)
use_queue = naily_queue if not service else naily_service_queue
use_exchange = naily_exchange if not service else naily_service_exchange
with Connection(conn_str) as conn:
with conn.Producer(serializer='json') as producer:
publish = functools.partial(producer.publish, message,
exchange=use_exchange, routing_key=name, declare=[use_queue])
try:
publish()
except amqp_exceptions.PreconditionFailed as e:
logger.warning(six.text_type(e))
# (dshulyak) we should drop both exchanges/queues in order
# for astute to be able to recover temporary queues
utils.delete_entities(
| conn, naily_service_exchange, naily_service_queue,
naily_exchange, naily_queue)
| publish()
|
ralhei/PyHDB | tests/test_cursor.py | Python | apache-2.0 | 9,174 | 0.000545 | # Copyright 2014, 2015 SAP SE.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: //www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import pytest
from pyhdb.cursor import format_operation
from pyhdb.exceptions import ProgrammingError
import tests.helper
TABLE = 'PYHDB_TEST_1'
TABLE_FIELDS = 'TEST VARCHAR(255)'
@pytest.fixture
def test_table_1(request, connection):
"""Fixture to create table for testing, and dropping it after test run"""
tests.helper.create_table_fixture(request, connection, TABLE, TABLE_FIELDS)
@pytest.fixture
def content_table_1(request, connection):
"""Additional fixture to test_table_1, inserts some rows for testing"""
cursor = connection.cursor()
cursor.execute("insert into PYHDB_TEST_1 values('row1')")
cursor.execute("insert into PYHDB_TEST_1 values('row2')")
cursor.execute("insert into PYHDB_TEST_1 values('row3')")
@pytest.mark.parametrize("parameters", [
None,
(),
[]
])
def test_format_operation_without_parameters(parameters):
"""Test that providing no parameter produces correct result."""
operation = "SELECT * FROM TEST WHERE fuu = 'bar'"
assert format_operation(operation, parameters) == operation
def test_format_operation_with_positional_parameters():
"""Test that correct number of parameters produces correct result."""
assert format_operation(
"INSERT INTO TEST VALUES(%s, %s)", ('Hello World', 2)
) == "INSERT INTO TEST VALUES('Hello World', 2)"
def test_format_operation_with_too_few_positional_parameters_raises():
"""Test that providing too few parameters raises exception"""
with pytest.raises(ProgrammingError):
format_operation("INSERT INTO TEST VALUES(%s, %s)", ('Hello World',))
def test_format_operation_with_too_many_positional_parameters_raises():
"""Test that providing too many parameters raises exception"""
with pytest.raises(ProgrammingError):
format_operation("INSERT INTO TEST VALUES(%s)", ('Hello World', 2))
def test_format_operation_with_named_parameters():
"""format_operation() is used for Python style parameter expansion"""
assert format_operation(
"INSERT INTO TEST VALUES(%(name)s, %(val)s)",
{'name': 'Hello World', 'val': 2}
) == "INSERT INTO TEST VALUES('Hello World', 2)"
@pytest.mark.hanatest
def test_cursor_fetch_without_execution(connection):
cursor = connection.cursor()
with pytest.raises(ProgrammingError):
cursor.fetchone()
@pytest.mark.hanatest
def test_cursor_fetchall_single_row(connection):
cursor = connection.cursor()
cursor.execute("SELECT 1 FROM DUMMY")
result = cursor.fetchall()
assert result == [(1,)]
@pytest.mark.hanatest
def test_cursor_fetchall_multiple_rows(connection):
cursor = connection.cursor()
cursor.execute('SELECT "VIEW_NAME" FROM "PUBLIC"."VIEWS" LIMIT 10')
result = cursor.fetchall()
assert len(result) == 10
# Test cases for different parameter style expansion
#
# paramstyle Meaning
# ---------------------------------------------------------
# 1) qmark Question mark style, e.g. ...WHERE name=?
# 2) numeric Numeric, positional style, e.g. ...WHERE name=:1
# 3) named Named style, e.g. ...WHERE name=:name -> NOT IMPLEMENTED !!
# 4) format ANSI C printf format codes, e.g. ...WHERE name=%s
# 5) pyformat Python extended format codes, e.g. ...WHERE name=%(name)s
@pytest.mark.hanatest
def test_cursor_execute_with_params1(connection, test_table_1, content_table_1):
"""Test qmark parameter expansion style - uses cursor.prepare*() methods"""
# Note: use fetchall() to check that only one row gets returned
cursor = connection.cursor()
sql = 'select test from PYHDB_TEST_1 where test=?'
# correct way:
assert cursor.execute(sql, ['row2']).fetchall() == [('row2',)]
# invalid - extra unexpected parameter
with pytest.raises(ProgrammingError):
cursor.execute(sql, ['row2', 'extra']).fetchall()
@pytest.mark.hanatest
def test_cursor_execute_with_params2(connection, test_table_1, content_table_1):
"""Test numeric parameter expansion style - uses cursor.prepare() methods"""
# Note: use fetchall() to check that only one row gets returned
cursor = connection.cursor()
sql = 'select test from PYHDB_TEST_1 where test=?'
# correct way:
assert cursor.execute(sql, ['row2']).fetchall() == [('row2',)]
# invalid - extra unexpected parameter
with pytest.raises(ProgrammingError):
cursor.execute(sql, ['row2', 'extra']).fetchall()
@pytest.mark.hanatest
def test_cursor_execute_with_params4(connection, test_table_1, content_table_1):
"""Test format (positional) parameter expansion style"""
# Uses prepare_operation method
cursor = connection.cursor()
sql = 'select test from PYHDB_TEST_1 where test=%s'
# correct way:
assert cursor.execute(sql, ['row2']).fetchall() == [('row2',)]
# invalid - extra unexpected parameter
with pytest.raises(ProgrammingError):
cursor.execute(sql, ['row2', 'extra']).fetchall()
@pytest.mark.hanatest
def test_cursor_execute_with_params5(connection, test_table_1, content_table_1):
"""Test pyformat (named) parameter expansion style"""
# Note: use fetchall() to check that only one row gets returned
cursor = connection.cursor()
sql = 'select test from {} where test=%(test)s'.format(TABLE)
# correct way:
assert cursor.execute(sql, {'test': 'row2'}).fetchall() == [('row2',)]
# also correct way, additional dict value should just be ignored
assert cursor.execute(sql, {'test': 'row2', 'd': 2}).fetchall() == \
[('row2',)]
@pytest.mark.hanatest
def test_cursor_insert_commit(connection, test_table_1):
cursor = connection.cursor()
cursor.execute("SELECT COUNT(*) FROM %s" % TABLE)
assert cursor.fetchone() == (0,)
cursor.execute("INSERT INTO %s VALUES('Hello World')" % TABLE)
assert cursor.rowcount == 1
cursor.execute("SELECT COUNT(*) FROM %s" % TABLE)
assert cursor.fetchone() == (1,)
connection.commit()
@pytest.mark.hanatest
def test_cursor_create_and_drop_table(connection):
cursor = connection.cursor()
if tests.helper.exists_table(connection, TABLE):
cursor.execute('DROP TABLE "%s"' % TABLE)
assert not tests.helper.exists_table(connection, TABLE)
cursor.execute('CREATE TABLE "%s" ("TEST" VARCHAR(255))' % TABLE)
assert tests.helper.exists_table(connection, TABLE)
cursor.execute('DROP TABLE "%s"' % TABLE)
@pytest.mark.hanatest
def test_received_last_resultset_part_resets_after_execute(connection):
# The private attribute was not reseted to False after
# executing another | statement
cursor = connection.cursor()
cursor.execute("SELECT 1 FROM DUMMY")
# Result is very small we got everything direct into buffer
assert cursor._received_last_resultset_part
cursor.execute("SELECT VIEW_NAME FROM PUBLIC.VIE | WS")
# Result is not small enouth for single resultset part
assert not cursor._received_last_resultset_part
@pytest.mark.hanatest
@pytest.mark.parametrize("method", [
'fetchone',
'fetchall',
'fetchmany',
])
def test_fetch_raises_error_after_close(connection, method):
cursor = connection.cursor()
cursor.close()
with pytest.raises(ProgrammingError):
getattr(cursor, method)()
@pytest.mark.hanatest
def test_execute_raises_error_after_close(connection):
cursor = connection.cursor()
cursor.close()
with pytest.raises(ProgrammingError):
cursor.execute("SELECT TEST FROM DUMMY")
@pytest.mark.hanatest
def test_cursor_description_after_execution(connection):
cursor = connection.cursor()
assert cursor.descri |
smartkiwi/interval_calculator | interval_calculator/bx/quicksect.py | Python | mit | 6,110 | 0.016858 | """
Intersects ... faster. Suports GenomicInterval datatype and multiple
chromosomes.
Copyright: James Taylor james@jamestaylor.org
This code is part of the bx-python package
license: bsd licensed
https://bitbucket.org/james_taylor/bx-python/src/ebf9a4b352d3267303657afd57bd184f379eaf28/lib/bx/intervals/operations/quicksect.py?at=default
"""
import math
import time
import sys
import random
class IntervalTree( object ):
def __init__( self ):
self.chroms = {}
def insert( self, interval, linenum=0, other=None ):
chrom = interval.chrom
start = interval.start
end = interval.end
if interval.chrom in self.chroms:
self.chroms[chrom] = self.chroms[chrom].insert( start, end, linenum, other )
else:
self.chroms[chrom] = IntervalNode( start, end, linenum, other )
def intersect( self, interval, report_func ):
chrom = interval.chrom
start = interval.start
end = interval.end
if chrom in self.chroms:
self.chroms[chrom].intersect( start, end, report_func )
def traverse( self, func ):
for item in self.chroms.itervalues():
item.traverse( func )
class IntervalNode( object ):
def __init__( self, start, end, linenum=0, other=None ):
# Python lacks the binomial distribution, so we convert a
# uniform into a binomial because it naturally scales with
# tree size. Also, python's uniform is perfect since the
# upper limit is not inclusive, which gives us undefined here.
self.priority = math.ceil( (-1.0 / math.log(.5)) * math.log( -1.0 / (random.uniform(0,1) - 1)))
self.start = start
self.end = end
self.maxend = self.end
self.minend = self.end
self.left = None
self.right = None
self.linenum = linenum
self.other = other
def insert( self, start, end, linenum=0, other=None ):
root = self
if start > self.start:
# insert to right tree
if self.right:
self.right = self.right.insert( start, end, linenum, other )
else:
self.right = IntervalNode(start, end, linenum, other )
# rebalance tree
if self.priority < self.right.priority:
root = self.rotateleft()
else:
# insert to left tree
if self.left:
self.left = self.left.insert( start, end, linenum, other )
else:
self.left = IntervalNode(start, end, linenum, other )
# rebalance tree
if self.priority < self.left.priority:
root = self.rotateright()
if root.right and root.left:
root.maxend = max( root.end, root.right.maxend, root.left.maxend )
root.minend = min( root.end, root.right.minend, root.left.minend )
elif root.right:
root.maxend = max( root.end, root.right.maxend )
root.minend = min( root.end, root.right.minend )
elif root.left:
root.maxend = max( root.end, root.left.maxend )
root.minend = min( root.end, root.left.minend )
return root
def rotateright( self ):
root = self.left
self.left = self.left.right
root.right = self
if self.right and self.left:
self.maxend = max(self.end, self.right.maxend, self.left.maxend)
self.minend = min(self.end, self.right.minend, self.left.minend )
elif self.right:
self.maxend = max(self.end, self.right.maxend)
self.minend = min(self.end, self.right.minend)
elif self.left:
self.maxend = max(self.end, self.left.maxend)
self.minend = min(self.end, self.left.minend )
return root
def rotateleft( self ):
root = self.right
self.right = self.right.left
root.left = self
if self.right and self.left:
self.maxend = max(self.end, self.right.maxend, self.left.maxend)
self.minend = min(self.end, self.right.minend, self.left.minend )
elif self.right:
self.maxend = max(self.end, self.right.maxend)
self.minend = min(self.end, self.right.minend)
elif self.left:
self.maxend = max(self.end, self.left.maxend)
self.minend = min(self.end, self.left.minend )
return root
def intersect( self, start, end, report_func ):
if start < self.end and end > self.start: report_func( self )
if self.left and start < self.left.maxend:
self.left.intersect( start, end, report_func )
if self.right and end > self.start:
self.right.intersect( start, end, report_func )
def traverse( self, func ):
if self.left: self.left.traverse( func )
func( self )
if self.right: self.right.traverse( func )
def main():
test = None
intlist = []
for x in range(20000):
start = random.randint(0,1000000)
end = start + random.randint(1, 1000)
if test: test = test.insert( start, end )
else: test = IntervalNode( start, end )
intlist.append( (start, end) )
starttime = time.clock()
| for x in range(5000):
start = random.randint(0, 10000000)
end = start + random.randint(1, 1000)
result = []
test.intersect( start, end, lambda x: result.append(x.linenum) )
print "%f for tree method" % (time.clock() - starttime)
starttime = time.clock()
for x in range(5000):
start = random.randint(0, 10000000)
end = start + random.randint(1, 1000)
bad_sect( intlist, start, end)
print "%f for linear (bad) method" % (time.clock( | ) - starttime)
def test_func( node ):
print "[%d, %d), %d" % (node.start, node.end, node.maxend)
def bad_sect( lst, int_start, int_end ):
intersection = []
for start, end in lst:
if int_start < end and int_end > start:
intersection.append( (start, end) )
return intersection
if __name__ == "__main__":
main()
|
TaliesinSkye/evennia | src/players/manager.py | Python | bsd-3-clause | 6,119 | 0.001307 | """
The managers f | or the custom Player object and permissions.
"""
import datetime
from functools import update_wrapper
from django.contrib.auth.models import User
from src.typeclasses.managers import returns_typeclass_list, returns_typeclass, TypedObjectManager
from src.utils import logger
__all__ = ("PlayerManager",)
#
# Player Manager
#
def returns_player_list(method):
"""
decor | ator that makes sure that a method
returns a Player object instead of a User
one (if you really want the User object, not
the player, use the player's 'user' property)
"""
def func(self, *args, **kwargs):
"This *always* returns a list."
match = method(self, *args, **kwargs)
if not match:
return []
try:
match = list(match)
except TypeError:
match = [match]
players = []
for user in match:
try:
players.append(user.get_profile())
except Exception:
# there is something wrong with get_profile. But
# there is a 1-1 relation between Users-Players, so we
# try to go the other way instead.
from src.players.models import PlayerDB
match = PlayerDB.objects.filter(user__id=user.id)
if match:
players.append(match[0])
else:
logger.log_trace("No connection User<->Player, maybe database was partially reset?")
return players
return update_wrapper(func, method)
def returns_player(method):
"""
Decorator: Always returns a single result or None.
"""
def func(self, *args, **kwargs):
"decorator"
rfunc = returns_player_list(method)
match = rfunc(self, *args, **kwargs)
if match:
return match[0]
else:
return None
return update_wrapper(func, method)
class PlayerManager(TypedObjectManager):
"""
This PlayerManager implements methods for searching
and manipulating Players directly from the database.
Evennia-specific search methods (will return Characters if
possible or a Typeclass/list of Typeclassed objects, whereas
Django-general methods will return Querysets or database objects):
dbref (converter)
dbref_search
get_dbref_range
object_totals
typeclass_search
num_total_players
get_connected_players
get_recently_created_players
get_recently_connected_players
get_player_from_email
get_player_from_uid
get_player_from_name
player_search (equivalent to ev.search_player)
swap_character
"""
def num_total_players(self):
"""
Returns the total number of registered users/players.
"""
return self.count()
@returns_typeclass_list
def get_connected_players(self):
"""
Returns a list of player objects with currently connected users/players.
"""
return self.filter(db_is_connected=True)
@returns_typeclass_list
@returns_player_list
def get_recently_created_players(self, days=7):
"""
Returns a QuerySet containing the player User accounts that have been
connected within the last <days> days.
"""
end_date = datetime.datetime.now()
tdelta = datetime.timedelta(days)
start_date = end_date - tdelta
return User.objects.filter(date_joined__range=(start_date, end_date))
@returns_typeclass_list
@returns_player_list
def get_recently_connected_players(self, days=7):
"""
Returns a QuerySet containing the player User accounts that have been
connected within the last <days> days.
days - number of days backwards to check
"""
end_date = datetime.datetime.now()
tdelta = datetime.timedelta(days)
start_date = end_date - tdelta
return User.objects.filter(last_login__range=(
start_date, end_date)).order_by('-last_login')
@returns_typeclass
@returns_player
def get_player_from_email(self, uemail):
"""
Returns a player object when given an email address.
"""
return User.objects.filter(email__iexact=uemail)
@returns_typeclass
@returns_player
def get_player_from_uid(self, uid):
"""
Returns a player object based on User id.
"""
try:
return User.objects.get(id=uid)
except User.model.DoesNotExist:
return None
@returns_typeclass
def get_player_from_name(self, uname):
"Get player object based on name"
try:
return self.get(user__username__iexact=uname)
except self.model.DoesNotExist:
return None
@returns_typeclass_list
def player_search(self, ostring):
"""
Searches for a particular player by name or
database id.
ostring = a string or database id.
"""
dbref = self.dbref(ostring)
if dbref or dbref == 0:
matches = self.filter(id=dbref)
if matches:
return matches
return self.filter(user__username__iexact=ostring)
def swap_character(self, player, new_character, delete_old_character=False):
"""
This disconnects a player from the current character (if any) and connects
to a new character object.
"""
if new_character.player:
# the new character is already linked to a player!
return False
# do the swap
old_character = player.character
if old_character:
old_character.player = None
try:
player.character = new_character
new_character.player = player
except Exception:
# recover old setup
if old_character:
old_character.player = player
player.character = old_character
return False
if old_character and delete_old_character:
old_character.delete()
return True
|
huyphan/pyyawhois | test/record/parser/test_response_whois_nic_asia_status_available.py | Python | mit | 2,079 | 0.002886 |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.nic.asia/status_available
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisNicAsiaStatusAvailable(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.nic.asia/status_available.txt"
host = "whois.nic.asia"
part = yawhois.record.Part(open(fixture_path, "r").read(), | host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, [])
def test_available(self):
eq_(self.record.available, True)
def test_domain(self):
eq_(self.record.domain, None)
def test_reserved(self):
eq_(self.record.reserved, False)
def test_nameservers(self):
eq_(self.reco | rd.nameservers.__class__.__name__, 'list')
eq_(self.record.nameservers, [])
def test_admin_contacts(self):
eq_(self.record.admin_contacts.__class__.__name__, 'list')
eq_(self.record.admin_contacts, [])
def test_registered(self):
eq_(self.record.registered, False)
def test_created_on(self):
eq_(self.record.created_on, None)
def test_registrar(self):
eq_(self.record.registrar, None)
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(self.record.registrant_contacts, [])
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(self.record.technical_contacts, [])
def test_updated_on(self):
eq_(self.record.updated_on, None)
def test_domain_id(self):
eq_(self.record.domain_id, None)
def test_expires_on(self):
eq_(self.record.expires_on, None)
def test_disclaimer(self):
eq_(self.record.disclaimer, None)
|
jbalogh/jingo | run_tests.py | Python | bsd-3-clause | 522 | 0 | import os
import nose
import django
NAME = os.path.basename(os.path.dirname(__file__))
ROOT = os.path.abspath(os.path.dirname(__file__))
os.environ['DJANGO_SETTINGS_MODU | LE'] = 'fake_settings'
os.environ['PYTHONPATH'] = os.pathsep.join([ROOT,
os.path.join(ROOT, 'examples')])
if __name__ == '__main__':
if hasattr(django, 'setup'):
# Django's app registry was added in 1.7. We need to c | all `setup` to
# initiate it.
django.setup()
nose.main()
|
cgstudiomap/cgstudiomap | main/parts/product-attribute/product_customer_code/product.py | Python | agpl-3.0 | 3,314 | 0 | # -*- coding: | utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2012 Vauxoo - http://www.vauxoo.com
# All Rights Rese | rved.
# info@vauxoo.com
############################################################################
# Coded by: Rodo (rodo@vauxoo.com),Moy (moylop260@vauxoo.com)
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
class product_product(orm.Model):
_inherit = "product.product"
_columns = {
'product_customer_code_ids': fields.one2many('product.customer.code',
'product_id',
'Customer Codes'),
}
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default['product_customer_code_ids'] = False
res = super(product_product, self).copy(
cr, uid, id, default=default, context=context)
return res
def name_search(self, cr, user, name='', args=None, operator='ilike',
context=None, limit=80):
res = super(product_product, self).name_search(
cr, user, name, args, operator, context, limit)
if not context:
context = {}
product_customer_code_obj = self.pool.get('product.customer.code')
if not res:
ids = []
partner_id = context.get('partner_id', False)
if partner_id:
id_prod_code = \
product_customer_code_obj.search(cr, user,
[('product_code',
'=', name),
('partner_id', '=',
partner_id)],
limit=limit,
context=context)
# TODO: Search for product customer name
id_prod = id_prod_code and product_customer_code_obj.browse(
cr, user, id_prod_code, context=context) or []
for ppu in id_prod:
ids.append(ppu.product_id.id)
if ids:
res = self.name_get(cr, user, ids, context)
return res
|
HelloTechie/tweeting_turkey | turkey_tweet.py | Python | mit | 1,243 | 0.00885 | #!/usr/bin/python
import sys
sys.path.insert(0, '/usr/local/lib/python2.7/site-packages/')
import mraa
import time
from twython import | Twython
# Authentication - Obtain Authorization URL
APP_KEY = 'lXZFwVtC8CGPKs4LOuv2m7WGS'
APP_SECRET = 'AoToSyNNyhzg2EhN38Edx6bQ59wPSLH8ztLZNZkWt1us7IzKUl'
OAUTH_TOKEN = '2892359329-fcC7F5l7Jx9CutiQACVrsTAUHN6GilG6RjmJPaH'
OAUTH_TOKEN_SECRET = '7HakyTT0SxwM8jAXjT8IPs73GW9QECRp3WnjUxXVNkmQZ'
twitter = Twython(APP_KEY, APP_SECRET,OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
print (mraa.getVersion())
tempSen | sor = mraa.Aio(0)
thresholds ={
60: 'Guys, it is starting to get warm in here...',
80: 'Boys, do I smell good',
100: 'Yum time! @hello_techie'
}
while 1:
temp = tempSensor.read() / 2.048
print('Current Temperature: %.3fC' % temp)
for thresholdTemp, thresholdMessage in thresholds.items():
if (temp > thresholdTemp):
print thresholdMessage
twitter.update_status(status='%.3fC: %s' % (temp, thresholdMessage))
del thresholds[thresholdTemp]
time.sleep(1)
|
BrandonLMorris/auacm-cli | src/auacm/main.py | Python | mit | 2,810 | 0.001068 | """
main.py
The central entry point of the auacm app.
"""
import requests, sys, textwrap
import auacm
import auacm.utils as utils
from auacm.exceptions import ConnectionError, ProblemNotFoundError, UnauthorizedException, InvalidSubmission, CompetitionNotFoundError
def main(args):
"""
Entry point for the auacm-cli app
Supported commands (passed as positional arguments):
[none] print logo and help
ping attempt to connect to the server
login log in as a user to the website
logout log out of current session
whoami print basic info about the current user
problem search for a problem
submit submit a solution to a problem
problem-info get detailed information on a problem
"""
if not args or args[0] in {'-h', '--help'}:
# No subcommand, print info
print(auacm.logo)
print('Wecome to the Auburn ACM command-line interface!')
print(textwrap.dedent('''
Supported Commands:
[none], -h, --help print this lovely help
ping attempt to connect to the server
login log into the website
logout log out of current session
whoami print basic info about the current user
problem [-v/--verbose] search for a problem
submit [-i/--id][-p/--python {2, 3}] <problem> <file>
problem-info [-i/--id] <problem>
competitions [[-i/--id] <competition>]
init [-i/--id] <problem>
test <solution> [[-i/--id] <problem>] [-p/--python {2, 3}]
'''))
elif args[0] in utils.callbacks:
try | :
print(utils.callbacks[args[0]](args[1:]) or '')
except (ProblemNotFoundError,
UnauthorizedException,
InvalidSubmission,
CompetitionNotFoundError) as exp: |
print(exp.message)
exit(1)
except (requests.exceptions.ConnectionError, ConnectionError):
print('There was an error connecting to the server: {}'
.format(auacm.BASE_URL))
exit(1)
else:
print('Whoops, that subcommand isn\'t supported.\n'
'Run again with -h or --help to see full list of commands.')
@utils.subcommand('ping')
def test(_=None):
"""Try to connect to the server"""
test_url = auacm.BASE_URL[:-4]
response = requests.get(test_url)
if response.ok:
return 'Connection successful! ' + str(response.status_code)
else:
raise auacm.exceptions.ConnectionError('Could not connect to server')
if __name__ == '__main__':
# Run the app from the main.py script directly
main(sys.argv[1:])
|
hurricup/intellij-community | python/testData/codeInsight/liveTemplates/context/general.py | Python | apache-2.0 | 13 | 0.230769 | p< | caret>
pass | |
tamasgal/controlhost | setup.py | Python | mit | 751 | 0.003995 | from setuptools import setup
from controlhost import version
setup(name='controlhost',
version=version,
url='https://github.com/tamasgal/controlhost/',
descri | ption='A set of classes and tools wich uses the ControlHost protocol.',
author='Tamas Gal',
author_email='himself@tamasgal.com',
packages=['controlhost'],
include_package_data=True,
platforms='any',
install_requires=[
],
entry_points={
'console_scripts': [
#'controlhost=controlhost.app:main',
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Pr | ogramming Language :: Python',
],
)
__author__ = 'Tamas Gal'
|
guykisel/inline-plz | tests/parsers/test_jsonlint.py | Python | isc | 753 | 0.003984 | # -*- coding: utf-8 -*-
import inlineplz.linters.jsonlint as jsonlint
def test_jsonlint():
input = [
("21.json", "21.json: line 1, col 25, found: ',' - expected: ':'. | "),
(
"25.json",
"25.json: line 1, col 1, found: 'INVALID' - expected: 'STRING', 'NUMBER', 'NULL', 'TRUE', 'FALSE', '{', '[', ']'.",
),
(
"23.json",
"23.json: line 1, col 13, found: 'INVALID' - expected: 'STRING', 'NUMBER', 'NULL', 'TRUE', 'FALSE', '{', '['.",
),
]
messages = sorted(list(jsonlint.JSONLintParser().parse(input)))
| assert messages[0][2] == "21.json: line 1, col 25, found: ',' - expected: ':'."
assert messages[0][1] == 1
assert messages[0][0] == "21.json"
|
cloudysunny14/CloudySwitch | cloudyswitch/app/topology_util.py | Python | apache-2.0 | 2,860 | 0.003846 | #!/usr/bin/env python
#
# Copyright 2013 cloudysunny14.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o | r
# implied.
# See the License for | the specific language governing permissions and
# limitations under the License.
import logging
from ryu.exception import RyuException
def find_all_paths(graph, start, end, path=[]):
path = path + [start]
if start == end:
return [path]
if not graph.has_key(start):
return []
paths = []
for node in graph[start]:
if node not in path:
newpaths = find_all_paths(graph, node, end, path)
for newpath in newpaths:
paths.append(newpath)
paths.sort(key = len)
return paths
class LinkedPorts(object):
def __init__(self):
self.link = {}
def addLink(self, link):
link_roots = self.link.get(link.src.dpid, [])
link_roots.append(link)
self.link[link.src.dpid] = link_roots
def getLink(self, src_dpid, dst_dpid):
link_roots = self.link[src_dpid]
for link in link_roots:
if link.dst.dpid == dst_dpid:
return link
return None
class PathList(object):
class IllegalLink(RyuException):
message = '%(msg)s'
def __init__(self, link_list):
self.link_list = link_list
self.ports = {}
self.linked_ports = LinkedPorts()
def _createGraph(self, link_list):
graph = {}
for link in link_list:
self.linked_ports.addLink(link)
src_dpid = link.src.dpid
dst_dpid = link.dst.dpid
linked_nodes = graph.get(src_dpid, [])
linked_nodes.append(dst_dpid)
graph[src_dpid] = linked_nodes
return graph
def createWholePath(self, src_dpid, dst_dpid):
graph = self._createGraph(self.link_list)
paths = find_all_paths(graph, src_dpid, dst_dpid)
path_ports = []
for path in paths:
ports = []
for index in range(len(path)-1):
link = self.linked_ports.getLink(path[index],
path[index+1])
if link is None:
raise PathList.IllegalLink(
msg='Illegal link found. Can\'t create paths %s' % link)
else:
ports.append(link.src)
ports.append(link.dst)
path_ports.append(ports)
return path_ports
|
superdesk/superdesk-ntb | server/ntb/commands/update_items.py | Python | agpl-3.0 | 2,412 | 0.001244 |
import bson
import bson.errors
import superdesk
from .update_topics import UpdateTopicsScript
from .update_places import UpdatePlacesScript
RESOURCES = ('events', 'planning', 'archive', 'published', 'archived')
SCRIPTS = [
("topics", UpdateTopicsScript()),
("places", UpdatePlacesScrip | t()),
]
def get_id(id):
try:
return bson.ObjectId(id)
except bson.errors.InvalidId:
return id
class UpdateItemsCommand(superdesk.Command):
"""Update Items"""
option_list = [
superdesk.Option('--resource', '-r', dest='resources', action='append', choices=RESOURCES),
superdesk.Option('--last', '-l'),
]
def run(s | elf, resources=None, last=None):
if not resources:
resources = RESOURCES
for resource in resources:
print("updating {resource}".format(resource=resource))
service = superdesk.get_resource_service(resource)
last_id = None
if last:
last_item = service.find_one(req=None, _id=get_id(last))
if not last_item:
print("skip resource {resource}".format(resource=resource))
continue
print("continue from {last}".format(last=last))
last_id = get_id(last_item['_id'])
query = {}
while True:
if last_id:
query["_id"] = {"$gt": last_id}
items = list(service.find(query, max_results=500).sort("_id", 1))
if not items:
print("done.")
break
for item in items:
_id = get_id(item['_id'])
if _id == last_id:
print("error: processing {_id} again".format(_id=last_id))
raise ValueError("Invalid id.")
last_id = _id
updates = {}
for _, script in SCRIPTS:
script(item, updates)
if updates:
print("update", resource, _id)
try:
service.system_update(_id, updates, item)
except Exception as err:
print("error", err, "updates", updates)
else:
print("ignore", resource, _id)
|
robertwatsonbath/gr-specest-3.7 | python/specest_mtm.py | Python | gpl-3.0 | 4,472 | 0.006485 | #!/usr/bin/env python
#
# Copyright 2010 Communications Engineering Lab, KIT
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks, fft
import specest_gendpss
import specest_swig
## Estimates PSD using Thomson's multitaper method
# @param[in] N: Length of the FFT
# @param[in] NW: Time Bandwidt | h Product usually is of value 2, 2.5, 3.0, 3.5, or 4
# @param[in] K: Numbers of Tapers to use. K should be smaller than 2*NW
# @param[in] weighting: Which type of weighting to use for the eigenspectra. Choices can be 'unity','eigenvalues' or adaptive
class mtm(gr.hier_block2):
""" Esti | mates PSD using Thomson's multitaper method. """
def __init__(self, N=512 , NW=3 , K=5, weighting='adaptive', fftshift=False):
gr.hier_block2.__init__(self, "mtm",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(1, 1, gr.sizeof_float*N))
self.check_parameters(N, NW, K)
self.s2v = blocks.stream_to_vector(gr.sizeof_gr_complex, N)
self.connect(self, self.s2v)
dpss = specest_gendpss.gendpss(N=N, NW=NW, K=K)
self.mtm = [eigenspectrum(dpss.dpssarray[i], fftshift) for i in xrange(K)]
if weighting == 'adaptive':
self.sum = specest_swig.adaptiveweighting_vff(N, dpss.lambdas)
self.connect_mtm(K)
self.connect(self.sum, self)
elif weighting == 'unity':
self.sum = blocks.add_ff(N)
self.divide = blocks.multiply_const_vff([1./K]*N)
self.connect_mtm(K)
self.connect(self.sum, self.divide, self)
elif weighting == 'eigenvalues':
self.eigvalmulti = []
self.lambdasum = 0
for i in xrange(K):
self.eigvalmulti.append(blocks.multiply_const_vff([dpss.lambdas[i]]*N))
self.lambdasum += dpss.lambdas[i]
self.divide = blocks.multiply_const_vff([1./self.lambdasum]*N)
self.sum = blocks.add_ff(N)
self.connect_mtm(K)
self.connect(self.sum, self.divide, self)
else:
raise ValueError, 'weighting-type should be: adaptive, unity or eigenvalues'
def connect_mtm(self, K):
""" Connects up all the eigenspectrum calculators. """
for i in xrange(K):
self.connect(self.s2v, self.mtm[i])
self.connect(self.mtm[i], (self.sum, i))
## Checks the validity of parameters
# @param[in] N: Length of the FFT
# @param[in] NW: Time Bandwidth Product
# @param[in] K: Numbers of Tapers to used
def check_parameters(self, N, NW, K):
""" Checks the validity of parameters. """
if NW < 1: raise ValueError, 'NW must be greater than or equal to 1'
if K < 2: raise ValueError, 'K must be greater than or equal to 2'
if (N % 1): raise TypeError, 'N has to be an integer'
if N < 1: raise ValueError, 'N has to be greater than 1'
## Computes the eigenspectra for the multitaper spectrum estimator:
# data ----> multiplication dpss ----> FFT ----> square ----> output eigenspectrum
# @param[in] dpss: the dpss used as a data taper
class eigenspectrum(gr.hier_block2):
""" Computes the eigenspectra for the multitaper spectrum estimator:
data --> multiplication dpss --> FFT --> mag-square --> output eigenspectrum """
def __init__(self, dpss, fftshift=False):
gr.hier_block2.__init__(self, "eigenspectrum",
gr.io_signature(1, 1, gr.sizeof_gr_complex*len(dpss)),
gr.io_signature(1, 1, gr.sizeof_float*len(dpss)))
self.window = dpss
self.fft = fft.fft_vcc(len(dpss), True, self.window, fftshift)
self.c2mag = blocks.complex_to_mag_squared(len(dpss))
self.connect(self, self.fft, self.c2mag, self)
|
AnselCmy/ARPS | report_crawler/report_crawler/spiders/spiders_001/_H/HNU001.py | Python | mit | 1,423 | 0.020334 | # -*- coding:utf-8 -*-
import scrapy
from report_crawler.spiders.__Global_function import get_localtime
from report_crawler.spiders.__Global_variable import now_time, end_time
class HNU001_Spider(scrapy.Spider):
name = 'HNU001'
start_urls = ['http://csee.hnu.edu.cn/Front/TZXX_List?LMXX_BH=20130728174138ec48068e-48bf-49a6-ac51-27d04a9b1baa']
domain = 'http://csee.hnu.edu.cn/'
def parse(self, response):
messages = response.xpath("//ul[@class='article-list']/li")
for i, message in enumerate(messages):
report_name = message.xpath(".//a/text()").extract()[0]
report_time = get_localtime(message.xpath("span/text()").extract()[0].strip().strip("[]"))
if report_time > end_time:
continue
if report_time < now_time:
return
report_url = self.domain + message.xpath(".//a/@href").extract()[0][1:]
yield scrapy.Request(report_url, callback=self.parse_pages,
meta={'link': report_url, 'number': i + 1, 'publication': report_time, 'title': report_name})
def parse_pages(self, response):
messages = response.xpath("//div[@class='content-1']")
return {'text': messages, 'number': response.meta['number'], 'organizer': u"湖 | 南大学大学信息科学与工程学院",
'faculty': self.name, 'link': response.meta['link'], 'publica | tion': response.meta['publication'],
'location': u"华中:湖南省-长沙市", 'title': response.meta['title']}
|
northern-bites/nao-man | noggin/playbook/Strategies.py | Python | gpl-3.0 | 4,793 | 0.005216 | from .. import NogginConstants
from . import PBConstants
from . import Formations
def sReady(team, workingPlay):
workingPlay.setStrategy(PBConstants.S_READY)
Formations.fReady(team, workingPlay)
def sNoFieldPlayers(team, workingPlay):
workingPlay.setStrategy(PBConstants.S_NO_FIELD_PLAYERS)
Formations.fNoFieldPlayers(team, workingPlay)
def sOneField(team, workingPlay):
workingPlay.setStrategy(PBConstants.S_ONE_FIELD_PLAYER)
# no kickoff formation- would be identical to fOneField
# Formation for ball in our goal box
if shouldUseDubD(team):
Formations.fOneDubD(team, workingPlay)
elif useFinder(team):
Formations.fFinder(team, workingPlay)
else:
Formations.fOneField(team, workingPlay)
def sTwoField(team, workingPlay):
'''
This is our standard strategy. Based around the 2008.
'''
workingPlay.setStrategy(PBConstants.S_TWO_FIELD_PLAYERS)
# Kickoff Formations
if useKickoffFormation(team):
Formations.fKickoff(team, workingPlay)
# Formation for ball in our goal box
elif shouldUseDubD(team):
Formations.fTwoDubD(team, workingPlay)
# ball hasn't been seen by me or teammates in a while
elif useFinder(team):
Formations.fFinder(team, workingPlay)
else:
# Keep a defender and a chaser
Formations.fDefensiveTwoField(team, workingPlay)
def sThreeField(team, workingPlay):
'''
This is our pulled goalie strategy.
'''
workingPlay.setStrategy(PBConstants.S_THREE_FIELD_PLAYERS)
# Kickoff Formations
if useKickoffFormation(team):
Formations.fKickoff(team, workingPlay)
# Formation for ball in our goal box
elif shouldUseDubD(team):
Formations.fThreeDubD(team, workingPlay)
# ball hasn't been seen by me or teammates in a while
elif useFinder(team):
Formations.fFinder(team, workingPlay)
# Standard spread formation
else:
Formations.fThreeField(team, workingPlay)
def sTwoZone(team, workingPl | ay):
"""
We attempt to keep one robot forward and one back
They become chaser if the ball is closer to them
"""
sTwoField(te | am, workingPlay)
def sWin(team, workingPlay):
workingPlay.setStrategy(PBConstants.S_WIN)
# Kickoff Formations
if useKickoffFormation(team):
Formations.fKickoff(team,workingPlay)
# Formation for ball in our goal box
elif shouldUseDubD(team):
Formations.fTwoDubD(team, workingPlay)
# ball hasn't been seen by me or teammates in a while
elif useFinder(team):
Formations.fFinder(team, workingPlay)
# Move the defender forward if the ball is close enough to opp goal, then become a middie
elif team.brain.ball.x > PBConstants.S_MIDDIE_DEFENDER_THRESH:
Formations.fNeutralDefenseTwoField(team, workingPlay)
else:
Formations.fDefensiveTwoField(team, workingPlay)
# Add strategies for testing various roles
def sTestDefender(team, workingPlay):
workingPlay.setStrategy(PBConstants.S_TEST_DEFENDER)
Formations.fTestDefender(team, workingPlay)
def sTestOffender(team, workingPlay):
workingPlay.setStrategy(PBConstants.S_TEST_OFFENDER)
Formations.fTestOffender(team, workingPlay)
def sTestMiddie(team, workingPlay):
workingPlay.setStrategy(PBConstants.S_TEST_MIDDIE)
Formations.fTestMiddie(team, workingPlay)
def sTestChaser(team, workingPlay):
workingPlay.setStrategy(PBConstants.S_TEST_CHASER)
# Game Ready Setup
if team.brain.gameController.currentState == 'gameReady' or\
team.brain.gameController.currentState =='gameSet':
# team is kicking off
Formations.fReady(team, workingPlay)
else:
Formations.fTestChaser(team, workingPlay)
#not sure this is the best place for these yet...
def useKickoffFormation(team):
if (team.brain.gameController.timeSincePlay() <
PBConstants.KICKOFF_FORMATION_TIME):
return True
else:
return False
def useFinder(team):
if (PBConstants.USE_FINDER and
team.brain.ball.timeSinceSeen() >
PBConstants.FINDER_TIME_THRESH):
return True
else:
return False
def shouldUseDubD(team):
if not PBConstants.USE_DUB_D:
return False
ballY = team.brain.ball.y
ballX = team.brain.ball.x
goalie = team.teammates[0]
return (
( ballY > NogginConstants.MY_GOALBOX_BOTTOM_Y + 5. and
ballY < NogginConstants.MY_GOALBOX_TOP_Y - 5. and
ballX < NogginConstants.MY_GOALBOX_RIGHT_X - 5.) or
( ballY > NogginConstants.MY_GOALBOX_TOP_Y - 5. and
ballY < NogginConstants.MY_GOALBOX_BOTTOM_Y + 5. and
ballX < NogginConstants.MY_GOALBOX_RIGHT_X + 5. and
goalie.isTeammateRole(PBConstants.CHASER) )
)
|
sgabe/Enumerator | enumerator/lib/services/snmp.py | Python | mit | 2,455 | 0.002037 | #!/usr/bin/env python
"""
The SNMP module performs snmp-related
enumeration tasks.
@author: Gabor Seljan (gabor<at>seljan.hu)
@version: 1.0
"""
import sys
from ..config import Config
from ..process_manager import ProcessManager
from ..generic_service import GenericService
class SnmpEnumeration(GenericService, ProcessManager):
SERVICE_DEFINITION = 'service:snmp'
PROCESSES = [{
'command': 'nmap -sU -Pn -p %(port)s %(scan_mode)s \
--script=snmp-brute,snmp-interfaces,snmp-netstat,snmp-processes,snmp-sysdescr,snmp-win32-users,snmp-win32-services,snmp-win32-shares,snmp-win32-software \
-oN %(output_dir)s/%(host)s-snmp-%(port)s-standard.txt %(hos | t)s',
'normal': '-T4',
'stealth': '-T2',
}, {
'command': 'onesixtyone -o %(output_dir)s/%(host)s-snmp-%(port)s-onesixtyone.txt %(host)s',
'normal': '',
'stealth': '',
}, {
'command': 'snmpwalk -c public -v1 %(host)s 1 > %(output_dir)s/%(host)s-snmp-%(port)s-snmpwalk.txt',
'normal': '',
'stealth': '',
}, {
'com | mand': 'snmpcheck -t %(host)s > %(output_dir)s/%(host)s-snmp-%(port)s-snmpcheck.txt',
'normal': '',
'stealth': '',
}]
def scan(self, directory, service_parameters):
"""Iterates over PROCESSES and builds
the specific parameters required for
command line execution of each process.
@param directory: Directory path where
final command output will go.
@param service_parameters: Dictionary with
key:value pairs of service-related data.
"""
ip = service_parameters.get('ip')
port = service_parameters.get('port')
config = Config().snmp
print '[+] enumerating SNMP service on host %s port %s' % (ip, port)
for process in self.PROCESSES:
self.start_processes(process.get('command'), params={
'host': ip,
'port': port,
'output_dir': directory,
'scan_mode': process.get(config['mode']),
}, display_exception=False)
if __name__ == '__main__':
"""For testing purposes, this
module can be executed as a script.
Use the following syntax from the root
directory of enumerator:
python -m lib.services.snmp <ip> <port> <output directory>
"""
snmp = SnmpEnumeration()
snmp.scan(sys.argv[3], dict(ip=sys.argv[1], port=sys.argv[2]))
|
sontung/pick_a_number | sound/click_sound.py | Python | mit | 116 | 0 | import pygame
py | game.mixer.init()
sound_object = pygame.mixer.Sound("sound/beep1.ogg")
sou | nd_object.set_volume(1)
|
HydrelioxGitHub/home-assistant | homeassistant/components/mysensors/sensor.py | Python | apache-2.0 | 2,939 | 0 | """Support for MySensors sensors."""
from homeassistant.components import mysensors
from homeassistant.components.sensor import DOMAIN
from homeassistant.const import TEMP_CELSIUS, TEMP_FAHRENHEIT
SENSORS = {
'V_TEMP': [None, 'mdi:thermometer'],
'V_HUM': ['%', 'mdi:water-percent'],
'V_DIMMER': ['%', 'mdi:percent'],
'V_LIGHT_LEVEL': ['%', 'white-balance-sunny'],
'V_DIRECTION': ['°', 'mdi:compass'],
'V_WEIGHT': ['kg', 'mdi:weight-kilogram'],
'V_DISTANCE': ['m', 'mdi:ruler'],
'V_IMPEDANCE': ['ohm', None],
'V_WATT': ['W', None],
'V_KWH': ['kWh', None],
'V_FLOW': ['m', None],
'V_VOLUME': ['m³', None],
'V_VOLTAGE': ['V', 'mdi:flash'],
'V_CURRENT': ['A', 'mdi:flash-auto'],
'V_PERCENTAGE': ['%', 'mdi:percent'],
'V_LEVEL': {
'S_SOUND': ['dB', 'mdi:volume-high'], 'S_VIBRATION': ['Hz', None],
'S_LIGHT_LEVEL': ['lx', 'white-balance-sunny']},
'V_ORP': ['mV', None],
'V_EC': ['μS/cm', None],
'V_VAR': ['var', None],
'V_VA': ['VA', None],
}
async def async_setup_pl | atform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the My | Sensors platform for sensors."""
mysensors.setup_mysensors_platform(
hass, DOMAIN, discovery_info, MySensorsSensor,
async_add_entities=async_add_entities)
class MySensorsSensor(mysensors.device.MySensorsEntity):
"""Representation of a MySensors Sensor child node."""
@property
def force_update(self):
"""Return True if state updates should be forced.
If True, a state change will be triggered anytime the state property is
updated, not just when the value changes.
"""
return True
@property
def state(self):
"""Return the state of the device."""
return self._values.get(self.value_type)
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
_, icon = self._get_sensor_type()
return icon
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
set_req = self.gateway.const.SetReq
if (float(self.gateway.protocol_version) >= 1.5 and
set_req.V_UNIT_PREFIX in self._values):
return self._values[set_req.V_UNIT_PREFIX]
unit, _ = self._get_sensor_type()
return unit
def _get_sensor_type(self):
"""Return list with unit and icon of sensor type."""
pres = self.gateway.const.Presentation
set_req = self.gateway.const.SetReq
SENSORS[set_req.V_TEMP.name][0] = (
TEMP_CELSIUS if self.gateway.metric else TEMP_FAHRENHEIT)
sensor_type = SENSORS.get(set_req(self.value_type).name, [None, None])
if isinstance(sensor_type, dict):
sensor_type = sensor_type.get(
pres(self.child_type).name, [None, None])
return sensor_type
|
caktus/django-opendebates | opendebates/tests/test_admin.py | Python | apache-2.0 | 4,608 | 0.001085 | from functools import partial
from django.contrib.admin.sites import AdminSite
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import patch
from opendebates.admin import SubmissionAdmin
from opendebates.models import Submission
from opendebates.tests.factories import SubmissionFactory, UserFactory, DebateFactory
# Force the reverse() used here in the tests to always use the full
# urlconf, despite whatever machinations have taken place due to the
# DebateMiddleware.
old_reverse = reverse
reverse = partial(old_reverse, urlconf='opendebates.urls')
# mock objects to make the admin think we're superusers.
# mostly copied from
# https://github.com/django/django/blob/master/tests/modeladmin/tests.py#L23-L32
class MockRequest(object):
POST = {}
META = {}
scheme = 'http'
class MockSuperUser(object):
def has_perm(self, perm):
return True
def is_authenticated(self):
return True
class RemoveSubmissionsTest(TestCase):
def setUp(self):
self.site = AdminSite()
self.admin = SubmissionAdmin(Submission, self.site)
self.password = 'secretpassword'
self.user = UserFactory(password=self.password, is_staff=True, is_superuser=True)
assert self.client.login(username=self.user.username, password=self.password)
self.submission = SubmissionFactory()
self.queryset = Submission.objects.all()
self.changelist_url = reverse('admin:opendebates_submission_changelist')
def tearDown(self):
Site.objects.clear_cache()
def test_get(self):
"""
GETting the intermediate page should have specified text and the PK of
the chosen submissions.
"""
request = MockRequest()
request.user = MockSuperUser()
request.debate = DebateFactory()
rsp = self.admin.remove_submissions(request, self.queryset)
self.assertEqual(rsp.status_code, 200)
self.assertContains(rsp, 'remove the selected submissions?')
self.assertContains(rsp, self.submission.pk)
@patch('opendebates.admin.send_email')
def test_post(self, mock_send_email):
"""
POSTing the form should cause submissions to be removed and email to be
sent.
"""
data = {
'post': 'Yes',
'action': 'remove_submissions',
'_selected_action': [self.submission.pk, ]
}
rsp = self.client.post(self.changelist_url, data=data)
self.assertRedirects(rsp, self.changelist_url)
# Now submission should not be approved
submission = Submission.objects.get()
self.assertFalse(submission.approved)
# and 1 email should have been sent
self.assertEqual(mock_send_email.call_count, 1)
submission = Submission.objects.get(id=self.submission.pk)
self.assertIsNotNone(submission.moderated_at)
@patch('opendebates.admin.send_email')
def test_post_multiple(self, mock_send_email):
"POSTing multiple submissions works as well."
data = {
'post': 'Yes',
'action': 'remove_submissions',
'_selected_action': [SubmissionFactory().pk, SubmissionFactory().pk]
}
rsp = self.client.post(self.changelist_url, data=data)
self.assertRedirects(rsp, self.changelist_url)
removed_submissions = Submission.objects.filter(approved=False)
self.assertEqual(removed_submissions.count(), 2)
sub1, sub2 = removed_submissions
self.assertIsNotNone(sub1.moderated_at)
self.assertIsNotNone(sub2.moderated_at)
untouched_submission = Submission.objects.filter(approved=True)
se | lf.assertEqual(untouched_submission.count(), 1)
# and 2 emails have been sent
self.assertEqual(mock_send_email.call_count, 2)
@patch('opendebates.admin.send_email')
def test_dont_send_email_if_already_unapproved(self, moc | k_send_email):
"If submission was already unapproved, don't bug the user again."
data = {
'post': 'Yes',
'action': 'remove_submissions',
'_selected_action': [SubmissionFactory(approved=False).pk]
}
rsp = self.client.post(self.changelist_url, data=data)
self.assertRedirects(rsp, self.changelist_url)
removed_submissions = Submission.objects.filter(approved=False)
self.assertEqual(removed_submissions.count(), 1)
# and ZERO emails have been sent
self.assertEqual(mock_send_email.call_count, 0)
|
craws/OpenAtlas-Python | openatlas/views/model.py | Python | gpl-2.0 | 9,096 | 0 | from typing import Any, Dict, Optional
from flask import g, render_template, url_for
from flask_babel import format_number, lazy_gettext as _
from flask_wtf import FlaskForm
from wtforms import (
BooleanField, IntegerField, SelectMultipleField, StringField, SubmitField,
widgets)
from wtforms.validators import InputRequired
from openatlas import app
from openatlas.forms.field import TableField
from openatlas.models.entity import Entity
from openatlas.models.network import Network
from openatlas.util.table import Table
from openatlas.util.util import link, required_group, uc_first
class LinkCheckForm(FlaskForm): # type: ignore
cidoc_domain = TableField('Domain', [InputRequired()])
cidoc_property = TableField('Property', [InputRequired()])
cidoc_range = TableField('R | ange', [InputRequired()])
save = SubmitField(uc_first(_('test')))
@app.route('/overview/model', methods=["GET", "POST"])
@required_group('readonly')
def model_index() -> str:
form = LinkCheckForm()
form_classes = \
{code: f'{code} {class_.name}'
for code, class_ in g.cidoc_classes.items()}
form.cidoc_domain.choices = form_classes
form.cidoc_ra | nge.choices = form_classes
form.cidoc_property.choices = {
code: f'{code} {property_.name}'
for code, property_ in g.properties.items()}
result = None
if form.validate_on_submit():
domain = g.cidoc_classes[form.cidoc_domain.data]
range_ = g.cidoc_classes[form.cidoc_range.data]
property_ = g.properties[form.cidoc_property.data]
result = {
'domain': domain,
'property': property_,
'range': range_,
'domain_valid': property_.find_object(
'domain_class_code',
domain.code),
'range_valid': property_.find_object(
'range_class_code',
range_.code)}
return render_template(
'model/index.html',
form=form,
result=result,
title=_('model'),
crumbs=[_('model')])
@app.route('/overview/model/class/<code>')
@required_group('readonly')
def class_entities(code: str) -> str:
table = Table(
['name'],
rows=[[link(entity)] for entity in Entity.get_by_cidoc_class(code)])
return render_template(
'table.html',
table=table,
title=_('model'),
crumbs=[
[_('model'), url_for('model_index')],
[_('classes'), url_for('class_index')],
link(g.cidoc_classes[code]),
_('entities')])
@app.route('/overview/model/class')
@required_group('readonly')
def class_index() -> str:
table = Table(
['code', 'name', 'count'],
defs=[
{'className': 'dt-body-right', 'targets': 2},
{'orderDataType': 'cidoc-model', 'targets': [0]},
{'sType': 'numeric', 'targets': [0]}])
for class_ in g.cidoc_classes.values():
count = ''
if class_.count:
count = format_number(class_.count)
if class_.code not in ['E53', 'E41', 'E82']:
count = link(
format_number(class_.count),
url_for('class_entities', code=class_.code))
table.rows.append([link(class_), class_.name, count])
return render_template(
'table.html',
table=table,
title=_('model'),
crumbs=[[_('model'), url_for('model_index')], _('classes')])
@app.route('/overview/model/property')
@required_group('readonly')
def property_index() -> str:
classes = g.cidoc_classes
properties = g.properties
table = Table(
[
'code', 'name', 'inverse', 'domain', 'domain name', 'range',
'range name', 'count'],
defs=[
{'className': 'dt-body-right', 'targets': 7},
{'orderDataType': 'cidoc-model', 'targets': [0, 3, 5]},
{'sType': 'numeric', 'targets': [0]}])
for property_ in properties.values():
table.rows.append([
link(property_),
property_.name,
property_.name_inverse,
link(classes[property_.domain_class_code]),
classes[property_.domain_class_code].name,
link(classes[property_.range_class_code]),
classes[property_.range_class_code].name,
format_number(property_.count) if property_.count else ''])
return render_template(
'table.html',
table=table,
title=_('model'),
crumbs=[[_('model'), url_for('model_index')], _('properties')])
@app.route('/overview/model/class_view/<code>')
@required_group('readonly')
def class_view(code: str) -> str:
class_ = g.cidoc_classes[code]
tables = {}
for table in ['super', 'sub']:
tables[table] = Table(paging=False, defs=[
{'orderDataType': 'cidoc-model', 'targets': [0]},
{'sType': 'numeric', 'targets': [0]}])
for code_ in getattr(class_, table):
tables[table].rows.append(
[link(g.cidoc_classes[code_]), g.cidoc_classes[code_].name])
tables['domains'] = Table(paging=False, defs=[
{'orderDataType': 'cidoc-model', 'targets': [0]},
{'sType': 'numeric', 'targets': [0]}])
tables['ranges'] = Table(paging=False, defs=[
{'orderDataType': 'cidoc-model', 'targets': [0]},
{'sType': 'numeric', 'targets': [0]}])
for property_ in g.properties.values():
if class_.code == property_.domain_class_code:
tables['domains'].rows.append([link(property_), property_.name])
elif class_.code == property_.range_class_code:
tables['ranges'].rows.append([link(property_), property_.name])
return render_template(
'model/class_view.html',
class_=class_,
tables=tables,
info={'code': class_.code, 'name': class_.name},
title=_('model'),
crumbs=[
[_('model'),
url_for('model_index')],
[_('classes'), url_for('class_index')],
class_.code])
@app.route('/overview/model/property_view/<code>')
@required_group('readonly')
def property_view(code: str) -> str:
property_ = g.properties[code]
domain = g.cidoc_classes[property_.domain_class_code]
range_ = g.cidoc_classes[property_.range_class_code]
info = {
'code': property_.code,
'name': property_.name,
'inverse': property_.name_inverse,
'domain': f'{link(domain)} {domain.name}',
'range': f'{link(range_)} {range_.name}'}
tables = {}
for table in ['super', 'sub']:
tables[table] = Table(paging=False, defs=[
{'orderDataType': 'cidoc-model', 'targets': [0]},
{'sType': 'numeric', 'targets': [0]}])
for code_ in getattr(property_, table):
tables[table].rows.append(
[link(g.properties[code_]), g.properties[code_].name])
return render_template(
'model/property_view.html',
tables=tables,
property_=property_,
info=info,
title=_('model'),
crumbs=[
[_('model'), url_for('model_index')],
[_('properties'), url_for('property_index')],
property_.code])
class NetworkForm(FlaskForm): # type: ignore
width = IntegerField(default=1200, validators=[InputRequired()])
height = IntegerField(default=600, validators=[InputRequired()])
charge = StringField(default=-80, validators=[InputRequired()])
distance = IntegerField(default=80, validators=[InputRequired()])
orphans = BooleanField(default=False)
classes = SelectMultipleField(
_('classes'),
widget=widgets.ListWidget(prefix_label=False))
@app.route('/overview/network/', methods=["GET", "POST"])
@app.route('/overview/network/<int:dimensions>', methods=["GET", "POST"])
@required_group('readonly')
def model_network(dimensions: Optional[int] = None) -> str:
network_classes = [class_ for class_ in g.classes.values() if class_.color]
for class_ in network_classes:
setattr(NetworkForm, class_.name, StringField(
default=class_.color,
render_kw={'da |
tuanchien/tascc | modules/ReadSensorsOwfs.py | Python | gpl-2.0 | 796 | 0.016332 | # ReadSensorsOwfs.py
###########################################################
# Class handles loading of thermometer sensor information #
# from the interface exposed by the owfs fuse #
# module. #
###########################################################
c | lass ReadSensorsOwfs:
def __init__(self, sensors, sensorData, owfsPath):
self.sensors = sensors
self.sensorData = sensorData
self.directory = owfsPath
def refreshSensors(self):
for sensor in self.sensors:
self.updateSensor(sensor)
def updateSensor(self, sensor):
sensorFile = open('%s/%s/temperature' % (self.directory,sensor), 'r')
temperature = sensorFile.readline()
sensorFile.close()
self.sensorData[sens | or] = str(int(float(temperature)*1000))
|
joachimmetz/plaso | plaso/parsers/sqlite_plugins/firefox_history.py | Python | apache-2.0 | 19,252 | 0.003324 | # -*- coding: utf-8 -*-
"""SQLite parser plugin for Mozilla Firefox history database files."""
from dfdatetime import posix_time as dfdatetime_posix_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
class FirefoxPlacesBookmarkAnnotationEventData(events.EventData):
"""Firefox bookmark annotation event data.
Attributes:
content (str): annotation content.
offset (str): identifier of the row, from which the event data was
extracted.
query (str): SQL query that was used to obtain the event data.
title (str): title of the bookmark folder.
url (str): bookmarked URL.
"""
DATA_TYPE = 'firefox:places:bookmark_annotation'
def __init__(self):
"""Initializes event data."""
super(FirefoxPlacesBookmarkAnnotationEventData, self).__init__(
data_type=self.DATA_TYPE)
self.content = None
self.offset = None
self.query = None
self.title = None
self.url = None
class FirefoxPlacesBookmarkFolderEventData(events.EventData):
"""Firefox bookmark folder event data.
Attributes:
offset (str): identifier of the row, from which the event data was
extracted.
query (str): SQL query that was used to obtain the event data.
title (str): title of the bookmark folder.
"""
DATA_TYPE = 'firefox:places:bookmark_folder'
def __init__(self):
"""Initializes event data."""
super(FirefoxPlacesBookmarkFolderEventData, self).__init__(
data_type=self.DATA_TYPE)
self.offset = None
self.query = None
self.title = None
class FirefoxPlacesBookmarkEventData(events.EventData):
"""Firefox bookmark event data.
Attributes:
host (str): visited hostname.
offset (str): identifier of the row, from which the event data was
extracted.
places_title (str): places title.
query (str): SQL query that was used to obtain the event data.
title (str): title of the bookmark folder.
type (int): bookmark type.
url (str): bookmarked URL.
visit_count (int): visit count.
"""
DATA_TYPE = 'firefox:places:bookmark'
def __init__(self):
"""Initializes event data."""
super(FirefoxPlacesBookmarkEventData, self).__init__(
data_type=self.DATA_TYPE)
self.host = None
self.offset = None
self.places_title = None
self.query = None
self.title = None
self.type = None
self.url = None
self.visit_count = None
class FirefoxPlacesPageVisitedEventData(events.EventData): |
"""Firefox page visited event data.
Attributes:
from_visit (str): URL that referred to the visited page.
hidden (str): value to indicated if the URL was hidden.
host (str): visited hostname.
offset (str): identifier of the row, from which the event data was
extracted.
query (str): SQL query that was used to obtain the event data.
title (str): title of the visited page.
typed (str): value to indicated if the URL was typed.
url (str): URL of the visit | ed page.
visit_count (int): visit count.
visit_type (str): transition type for the event.
"""
DATA_TYPE = 'firefox:places:page_visited'
def __init__(self):
"""Initializes event data."""
super(FirefoxPlacesPageVisitedEventData, self).__init__(
data_type=self.DATA_TYPE)
self.from_visit = None
self.hidden = None
self.host = None
self.offset = None
self.query = None
self.title = None
self.typed = None
self.url = None
self.visit_count = None
self.visit_type = None
class FirefoxHistoryPlugin(interface.SQLitePlugin):
"""SQLite parser plugin for Mozilla Firefox history database files.
The Mozilla Firefox history database file is typically stored in:
places.sqlite
"""
NAME = 'firefox_history'
DATA_FORMAT = 'Mozilla Firefox history SQLite database (places.sqlite) file'
REQUIRED_STRUCTURE = {
'moz_places': frozenset([
'url', 'title', 'visit_count', 'rev_host', 'hidden', 'typed', 'id']),
'moz_historyvisits': frozenset([
'id', 'visit_date', 'from_visit', 'visit_type', 'place_id']),
'moz_bookmarks': frozenset([
'type', 'title', 'dateAdded', 'lastModified', 'id', 'fk']),
'moz_items_annos': frozenset([
'content', 'dateAdded', 'lastModified', 'id', 'item_id'])}
QUERIES = [
(('SELECT moz_historyvisits.id, moz_places.url, moz_places.title, '
'moz_places.visit_count, moz_historyvisits.visit_date, '
'moz_historyvisits.from_visit, moz_places.rev_host, '
'moz_places.hidden, moz_places.typed, moz_historyvisits.visit_type '
'FROM moz_places, moz_historyvisits '
'WHERE moz_places.id = moz_historyvisits.place_id'),
'ParsePageVisitedRow'),
(('SELECT moz_bookmarks.type, moz_bookmarks.title AS bookmark_title, '
'moz_bookmarks.dateAdded, moz_bookmarks.lastModified, '
'moz_places.url, moz_places.title AS places_title, '
'moz_places.rev_host, moz_places.visit_count, moz_bookmarks.id '
'FROM moz_places, moz_bookmarks '
'WHERE moz_bookmarks.fk = moz_places.id AND moz_bookmarks.type <> 3'),
'ParseBookmarkRow'),
(('SELECT moz_items_annos.content, moz_items_annos.dateAdded, '
'moz_items_annos.lastModified, moz_bookmarks.title, '
'moz_places.url, moz_places.rev_host, moz_items_annos.id '
'FROM moz_items_annos, moz_bookmarks, moz_places '
'WHERE moz_items_annos.item_id = moz_bookmarks.id '
'AND moz_bookmarks.fk = moz_places.id'),
'ParseBookmarkAnnotationRow'),
(('SELECT moz_bookmarks.id, moz_bookmarks.title,'
'moz_bookmarks.dateAdded, moz_bookmarks.lastModified '
'FROM moz_bookmarks WHERE moz_bookmarks.type = 2'),
'ParseBookmarkFolderRow')]
_SCHEMA_V24 = {
'moz_anno_attributes': (
'CREATE TABLE moz_anno_attributes ( id INTEGER PRIMARY KEY, name '
'VARCHAR(32) UNIQUE NOT NULL)'),
'moz_annos': (
'CREATE TABLE moz_annos ( id INTEGER PRIMARY KEY, place_id INTEGER '
'NOT NULL, anno_attribute_id INTEGER, mime_type VARCHAR(32) DEFAULT '
'NULL, content LONGVARCHAR, flags INTEGER DEFAULT 0, expiration '
'INTEGER DEFAULT 0, type INTEGER DEFAULT 0, dateAdded INTEGER '
'DEFAULT 0, lastModified INTEGER DEFAULT 0)'),
'moz_bookmarks': (
'CREATE TABLE moz_bookmarks ( id INTEGER PRIMARY KEY, type INTEGER, '
'fk INTEGER DEFAULT NULL, parent INTEGER, position INTEGER, title '
'LONGVARCHAR, keyword_id INTEGER, folder_type TEXT, dateAdded '
'INTEGER, lastModified INTEGER)'),
'moz_bookmarks_roots': (
'CREATE TABLE moz_bookmarks_roots ( root_name VARCHAR(16) UNIQUE, '
'folder_id INTEGER)'),
'moz_favicons': (
'CREATE TABLE moz_favicons ( id INTEGER PRIMARY KEY, url '
'LONGVARCHAR UNIQUE, data BLOB, mime_type VARCHAR(32), expiration '
'LONG)'),
'moz_historyvisits': (
'CREATE TABLE moz_historyvisits ( id INTEGER PRIMARY KEY, '
'from_visit INTEGER, place_id INTEGER, visit_date INTEGER, '
'visit_type INTEGER, session INTEGER)'),
'moz_inputhistory': (
'CREATE TABLE moz_inputhistory ( place_id INTEGER NOT NULL, input '
'LONGVARCHAR NOT NULL, use_count INTEGER, PRIMARY KEY (place_id, '
'input))'),
'moz_items_annos': (
'CREATE TABLE moz_items_annos ( id INTEGER PRIMARY KEY, item_id '
'INTEGER NOT NULL, anno_attribute_id INTEGER, mime_type VARCHAR(32) '
'DEFAULT NULL, content LONGVARCHAR, flags INTEGER DEFAULT 0, '
'expiration INTEGER DEFAULT 0, type INTEGER DEFAULT 0, dateAdded '
'INTEGER DEFAULT 0, lastModified INTEGER DEFAULT 0)'),
'moz_keywords': (
'CREATE TABLE moz_keywords ( id INTEGER PRIMARY KEY AUTOINCREMENT, '
'keyword TEXT UNIQUE)'),
'moz_places': (
'CREATE TABLE moz_places ( id INTEGER PRIMARY KEY, url LONGVARCHAR, '
'title LONGVARCHAR, |
Oisota/Breakout | breakout/editor/editor.py | Python | gpl-3.0 | 3,858 | 0.005962 | """
Editor Module
This module defines the Editor class. This module runs
the game's level editor.
"""
import sys, os
import tkinter as tk
from tkinter.filedialog import askopenfilename, asksaveasfilename
from .. import asset
from ..config import START_LEVEL, LEVEL_PATH
from .brick import BrickFrame
from .entry import EntryFrame
class Editor(tk.Frame):
"""Main level editor frame."""
def __init__(self, parent):
"""Initialize the editor."""
tk.Frame.__init__(self, parent)
self.parent = parent
self.level_filename = START_LEVEL
self.parent.title('Breakout Editor - ' + self.level_filename)
self.level = asset.load_level(self.level_filename)
self.grid()
self.create_widgets()
self.grid_widgets()
def create_widgets(self):
"""Create editor frame widgets."""
#create file menu
self.menu = tk.Menu(self.parent)
self.parent.config(menu=self.menu)
self.file_menu = tk.Menu(self.menu)
self.file_menu.add_command(label='New', command=self.new_level)
self.file_menu.add_command(label='Open', command=self.open_level)
self.file_menu.add_separator()
self.file_menu.add_command(label='Save', command=self.save_level)
self.file_menu.add_command(label='Save as', command=self.save_level_as)
self.file_menu.add_separator()
self.file_menu.add_command(label='Exit', command=self.quit)
self.menu.add_cascade(label='File', menu=self.file_menu)
#create input boxes
self.entry_frame = EntryFrame(self, self.level)
#create brick button grid
self.brick_frame = BrickFrame(self, self.level['bricks'], self.entry_frame.color_option)
def grid_widgets(self):
"""position widgets in the frame."""
self.brick_frame.grid(row=0, column=0)
self.brick_frame['pady'] = 35
self.brick_frame['padx'] = 10
self.entry_frame.grid(row=1, column=0)
def save_level(self):
"""Save the level under the current level name."""
if self.level_filename == 'untitled.json':
self.save_level_as();
else:
level = {
'name': self.entry_frame.level_name.get(),
'ball_speed': self.entry_frame.ball_speed.get(),
'next': self.entry_frame.next_level.get(),
'bricks': self.brick_frame.bricks
}
asset.save_level(level, self.level_filename)
def save_level_as(self):
"""Save the level file."""
filename = asksa | veasfilename(initialdir=LEVEL_PATH, initialfile=self.level_filename)
if filename != '':
self.level_filename = filename
level = {
'name': self.entry_frame.level_name.get(),
'ball_speed': sel | f.entry_frame.ball_speed.get(),
'next': self.entry_frame.next_level.get(),
'bricks': self.brick_frame.bricks
}
asset.save_level(level, self.level_filename)
def open_level(self):
"""Open the level file."""
filename = askopenfilename(initialdir=LEVEL_PATH, initialfile=self.level_filename)
if filename != '':
self.level_filename = filename
self.parent.title('Breakout Editor - ' + os.path.basename(self.level_filename))
self.level = asset.load_level(os.path.basename(self.level_filename))
self.brick_frame.update(self.level['bricks'])
self.entry_frame.update(self.level)
def new_level(self):
"""Create a blank level."""
pass
def quit(self):
"""Exit the editor."""
sys.exit()
def run():
"""Run the Editor."""
root = tk.Tk()
root.geometry('800x600')
editor = Editor(root)
editor.mainloop()
|
ChaseSnapshot/smcity | smcity/models/aws/aws_data.py | Python | unlicense | 6,158 | 0.006983 | ''' Data model and factory implementations that are backed by Amazon Web Service's DynamoDB2 NoSQL database '''
from boto.dynamodb2.table import Table
from time import strftime, strptime
from smcity.misc.errors import CreateError, ReadError
from smcity.misc.logger import Logger
from smcity.models.data import Data, DataFactory
logger = Logger(__name__)
class AwsData(Data):
''' AWS specific implementation of the Data model. '''
def __init__(self, record):
'''
Constructor.
@param record Database record corresponding to this peice of Data.
@paramType dictionary(dynamodb2.Item)
@returns n/a
'''
assert record is not None
self.record = record
def get_content(self):
''' {@inheritDocs} '''
return self.record['content']
def get_datum_id(self):
''' {@inheritDocs} '''
return self.record['datum_id']
def get_location(self):
''' {@inheritDocs} '''
lat = float(self.record['lat']) / 10000000
lon = float(self.record['lon']) / 10000000
|
return (lon, lat)
def get_set_id(self):
''' {@inheritDocs} '''
return self.record['set_id']
def get_timestamp(self):
''' {@inheritDocs} '''
return strptime(self.record['timestamp'], '%Y-%m-%d %H:%M:%S')
def get_type(self):
''' {@inheritDocs} '''
return self.record['type']
class AwsDataFactory(DataFactory):
def __init__(self, config):
'''
Constructor.
@param conf | ig Configuration settings. Expected definition:
Section: database
Key: data_table
Type: string
Desc: Name of the Data model table
@paramType ConfigParser
@returns n/a
'''
self.global_table = Table(config.get('database', 'global_data_table'))
self.set_table = Table(config.get('database', 'set_data_table'))
def create_data(self, content, datum_id, location, set_id, timestamp, type):
''' {@inheritDocs} '''
assert content is not None
assert datum_id is not None
assert -180 <= location[0] and location[0] < 180, location[0]
assert -90 <= location[1] and location[1] < 90, location[1]
assert set_id is not None
assert timestamp is not None
assert type is not None
# Normalize the values
lat_norm = int(location[1] * 10000000)
lon_norm = int(location[0] * 10000000)
timestamp_norm = strftime('%Y-%m-%d %H:%M:%S', timestamp)
# Create the database record
data = {
'content' : content,
'datum_id' : datum_id,
'lat' : lat_norm,
'lat_copy' : lat_norm,
'lon' : lon_norm,
'lon_copy' : lon_norm,
'set_id' : set_id,
'timestamp' : timestamp_norm,
'timestamp_copy' : timestamp_norm,
'type' : type
}
result = False
if set_id == 'global': # If this is a global data point
result = self.global_table.put_item(data=data)
else: # If this is a set data point
result = self.set_table.put_item(data=data)
# If we failed to create the database record
if result is False:
raise CreateError("Failed to create the Data(" + str(data) + ")!")
def copy_data(self, set_id, datas):
''' {@inheritDocs} '''
assert set_id is not None
with self.set_table.batch_write() as batch:
for data in datas:
batch.put_item(data = {
'content' : data.get_content(),
'datum_id' : data.get_datum_id(),
'lat' : data.record['lat'],
'lat_copy' : data.record['lat_copy'],
'lon' : data.record['lon'],
'lon_copy' : data.record['lon_copy'],
'set_id' : set_id,
'timestamp' : data.record['timestamp'],
'timestamp_copy' : data.record['timestamp_copy'],
'type' : data.record['type']
})
def filter_global_data(self, min_timestamp=None, max_timestamp=None,
min_lat=None, max_lat=None,
min_lon=None, max_lon=None,
segment_id=0, num_segments=1,
type=None
):
''' {@inheritDocs} '''
kwargs = {}
if min_timestamp is not None:
kwargs['timestamp__gte'] = strftime('%Y-%m-%d %H:%M:%S', min_timestamp)
if max_timestamp is not None:
kwargs['timestamp_copy__lte'] = strftime('%Y-%m-%d %H:%M:%S', max_timestamp)
if min_lat is not None:
kwargs['lat__gte'] = int(min_lat * 10000000)
if max_lat is not None:
kwargs['lat_copy__lte'] = int(max_lat * 10000000)
if min_lon is not None:
kwargs['lon__gte'] = int(min_lon * 10000000)
if max_lon is not None:
kwargs['lon_copy__lte'] = int(max_lon * 10000000)
if type is not None:
kwargs['type__eq'] = type
kwargs['set_id__eq'] = 'global'
kwargs['segment'] = segment_id
kwargs['total_segments'] = num_segments
logger.debug("Scan Args: %s", kwargs)
return AwsDataIterator(self.global_table.scan(**kwargs))
def get_data_set(self, set_id):
''' {@inheritDocs} '''
return AwsDataIterator(self.set_table.query(set_id__eq=set_id))
class AwsDataIterator():
''' AWS specific implementation of the Data result set iterator. '''
def __init__(self, result_set):
'''
Constructor.
@param result_set DynamoDB2 ResultSet to wrap.
@param boto.dynamodb2.ResultSet
@returns n/a
'''
assert result_set is not None, "result_set must not be None!"
self.result_set = result_set
def __iter__(self):
return self
def next(self):
return AwsData(self.result_set.next())
|
oblique-labs/pyVM | rpython/jit/backend/llsupport/test/ztranslation_test.py | Python | mit | 11,930 | 0.004946 | import os, sys, py
from rpython.tool.udir import udir
from rpython.rlib.jit import JitDriver, unroll_parameters, set_param
from rpython.rlib.jit import PARAMETERS, dont_look_inside
from rpython.rlib.jit import promote, _get_virtualizable_token
from rpython.rlib import jit_hooks, rposix, rgc
from rpython.rlib.objectmodel import keepalive_until_here
from rpython.rlib.rthread import ThreadLocalReference, ThreadLocalField
from rpython.jit.backend.detect_cpu import getcpuclass
from rpython.jit.backend.test.support import CCompiledMixin
from rpython.jit.codewriter.policy import StopAtXPolicy
from rpython.config.config import ConfigError
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rtyper.lltypesystem import lltype, rffi, rstr
from rpython.rlib.rjitlog import rjitlog as jl
class TranslationTest(CCompiledMixin):
CPUClass = getcpuclass()
def test_stuff_translates(self):
# this is a basic test that tries to hit a number of features and their
# translation:
# - jitting of loops and bridges
# - two virtualizable types
# - set_param interface
# - profiler
# - full optimizer
# - floats neg and abs
# - cast_int_to_float
# - llexternal with macro=True
# - extra place for the zero after STR instances
class BasicFrame(object):
_virtualizable_ = ['i']
def __init__(self, i):
self.i = i
class Frame(BasicFrame):
pass
eci = ExternalCompilationInfo(post_include_bits=['''
#define pypy_my_fabs(x) fabs(x)
'''], includes=['math.h'])
myabs1 = rffi.llexternal('pypy_my_fabs', [lltype.Float],
lltype.Float, macro=True, releasegil=False,
compilation_info=eci)
myabs2 = rffi.llexternal('pypy_my_fabs', [lltype.Float],
lltype.Float, macro=True, releasegil=True,
compilation_info=eci)
@jl.returns(jl.MP_FILENAME,
jl.MP_LINENO,
jl.MP_INDEX)
def get_location():
return ("/home.py",0,0)
jitdriver = JitDriver(greens = [],
reds = ['total', 'frame', 'prev_s', 'j'],
virtualizables = ['frame'],
get_location = get_location)
def f(i, j):
for param, _ in unroll_parameters:
defl = PARAMETERS[param]
set_param(jitdriver, param, defl)
set_param(jitdriver, "threshold", 3)
set_param(jitdriver, "trace_eagerness", 2)
total = 0
frame = Frame(i)
j = float(j)
prev_s = rstr.mallocstr(16)
while frame.i > 3:
jitdriver.can_enter_jit(frame=frame, total=total, j=j,
prev_s=prev_s)
jitdriver.jit_merge_point(frame=frame, total=total, j=j,
prev_s=prev_s)
_get_virtualizable_token(frame)
total += frame.i
if frame.i >= 20:
frame.i -= 2
frame.i -= 1
j *= -0.712
if j + (-j): raise ValueError
j += frame.i
k = myabs1(myabs2(j))
if k - abs(j): raise ValueError
if k - abs(-j): raise ValueError
s = rstr.mallocstr(16)
rgc.ll_write_final_null_char(s)
rgc.ll_write_final_null_char(prev_s)
if (frame.i & 3) == 0:
prev_s = s
return chr(total % 253)
#
class Virt2(object):
_virt | ualizable_ = ['i']
def __init__(self, i):
self.i = i
from rpython.rlib.libffi import types, CDLL, ArgChain
from rpython.rlib.test.test_clibffi import get_libm_name
libm_name = get_libm_name(sys.platform)
jitdriver2 = JitDriver(greens=[], reds = ['v2', 'func', 'res', 'x'],
virtualizables = ['v2'])
def libffi_stuff(i, j):
lib = CDLL(libm_name)
func = lib.getpointe | r('fabs', [types.double], types.double)
res = 0.0
x = float(j)
v2 = Virt2(i)
while v2.i > 0:
jitdriver2.jit_merge_point(v2=v2, res=res, func=func, x=x)
promote(func)
argchain = ArgChain()
argchain.arg(x)
res = func.call(argchain, rffi.DOUBLE)
v2.i -= 1
return res
#
def main(i, j):
a_char = f(i, j)
a_float = libffi_stuff(i, j)
return ord(a_char) * 10 + int(a_float)
expected = main(40, -49)
res = self.meta_interp(main, [40, -49])
assert res == expected
class TranslationTestCallAssembler(CCompiledMixin):
CPUClass = getcpuclass()
def test_direct_assembler_call_translates(self):
"""Test CALL_ASSEMBLER and the recursion limit"""
# - also tests threadlocalref_get
from rpython.rlib.rstackovf import StackOverflow
class Thing(object):
def __init__(self, val):
self.val = val
class Frame(object):
_virtualizable_ = ['thing']
driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'],
virtualizables = ['frame'],
get_printable_location = lambda codeno: str(codeno))
class SomewhereElse(object):
pass
somewhere_else = SomewhereElse()
class Foo(object):
pass
t = ThreadLocalReference(Foo, loop_invariant=True)
tf = ThreadLocalField(lltype.Char, "test_call_assembler_")
def change(newthing):
somewhere_else.frame.thing = newthing
def main(codeno):
frame = Frame()
somewhere_else.frame = frame
frame.thing = Thing(0)
portal(codeno, frame)
return frame.thing.val
def portal(codeno, frame):
i = 0
while i < 10:
driver.can_enter_jit(frame=frame, codeno=codeno, i=i)
driver.jit_merge_point(frame=frame, codeno=codeno, i=i)
nextval = frame.thing.val
if codeno == 0:
subframe = Frame()
subframe.thing = Thing(nextval)
nextval = portal(1, subframe)
elif frame.thing.val > 40:
change(Thing(13))
nextval = 13
frame.thing = Thing(nextval + 1)
i += 1
if t.get().nine != 9: raise ValueError
if ord(tf.getraw()) != 0x92: raise ValueError
return frame.thing.val
driver2 = JitDriver(greens = [], reds = ['n'])
def main2(bound):
try:
while portal2(bound) == -bound+1:
bound *= 2
except StackOverflow:
pass
return bound
def portal2(n):
while True:
driver2.jit_merge_point(n=n)
n -= 1
if n <= 0:
return n
n = portal2(n)
assert portal2(10) == -9
def setup(value):
foo = Foo()
foo.nine = value
t.set(foo)
tf.setraw("\x92")
return foo
def mainall(codeno, bound):
foo = setup(bound + 8)
result = main(codeno) + main2(bound)
keepalive_until_here(foo)
return result
tmp_obj = setup(9)
expected_1 = main(0)
res = self.meta_interp(mainall, [0, 1], inline=True,
policy=StopAtXPolicy(change))
print hex(res)
assert res & 255 == expected_1
bound = res & ~255
assert 1024 <= bound <= 131072
assert bound & (bound-1) == 0 |
sbellver/rdiffweb | rdiffweb/page_prefs.py | Python | gpl-3.0 | 5,423 | 0 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# rdiffweb, A web interface to rdiff-backup repositories
# Copyright (C) 2014 rdiffweb contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import cherrypy
import logging
import page_main
import rdw_spider_repos
import email_notification
# Define the logger
logger = logging.getLogger(__name__)
class rdiffPreferencesPage(page_main.rdiffPage):
sampleEmail = 'joe@example.com'
@cherrypy.expose
def index(self, action=u"", current=u"", new=u"", confirm=u""):
params = {}
# Process the parameters.
if self._is_submit():
try:
if action == "set_password":
params = self._set_password(current, new, confirm)
elif action == "update_repos":
params = self._update_repos()
elif action == 'set_notifications':
params = self._setNotifications()
except ValueError as e:
params['error'] = unicode(e)
except Exception as e:
logger.exception("unknown error processing action")
params['error'] = unicode(e)
# Get page params
try:
params.update(self._get_parms_for_page())
except Exception as e:
params['error'] = unicode(e)
return self._writePage("prefs.html", **params)
def _set_password(self, old_password, new_password, confirm_password):
# Check if current database support it.
if not self.getUserDB().is_modifiable():
return {'error': """Password changing is not
supported with the active user
database."""}
# Check if confirmation is valid.
if new_password != confirm_password:
return {'error': "The passwords do not match."}
self.getUserDB().set_password(self.getUsername(),
old_password,
new_password)
return {'success': "Password updated successfully."}
def _update_repos(self):
rdw_spider_repos.findReposForUser(self.getUsername(), self.getUserDB())
return {'success': """Successfully updated repositories."""}
def _setNotifications(self, parms):
if not self.getUserDB().is_modifiable():
return self._writePrefsPage(error="""Email notification is not
supported with the active user
database.""")
repos = self.getUserDB().get_repos(self.getUsername())
for parmName in parms.keys():
if parmName == "userEmail":
if parms[parmName] == self.sampleEmail:
parms[parmName] = ''
self.getUserDB().set_email(
self.getUsername(), parms[parmName])
if parmName.endswith("numDays"):
backupName = parmName[:-7]
if backupName in repos:
if parms[parmName] == "Don't notify":
maxDays = 0
else:
maxDays = int(parms[parmName][0])
self.getUserDB().set_repo_maxage(
self.getUsername(), backupName, maxDays)
return self._writePrefsPage(success="""Successfully changed
notification settings.""")
def _get_parms_for_page(self):
email = self.getUserDB().get_email(self.getUsername())
parms = { |
"userEmail": email,
"notificationsEnabled": False,
"backups": [],
"sampleEmail": self.sampleEmail
}
if email_notification.emailNotif | ier().notificationsEnabled():
repos = self.getUserDB().get_repos(self.getUsername())
backups = []
for repo in repos:
maxAge = self.getUserDB().get_repo_maxage(
self.getUsername(), repo)
notifyOptions = []
for i in range(0, 8):
notifyStr = "Don't notify"
if i == 1:
notifyStr = "1 day"
elif i > 1:
notifyStr = str(i) + " days"
selectedStr = ""
if i == maxAge:
selectedStr = "selected"
notifyOptions.append(
{"optionStr": notifyStr, "selectedStr": selectedStr})
backups.append(
{"backupName": repo, "notifyOptions": notifyOptions})
parms.update({"notificationsEnabled": True, "backups": backups})
return parms
|
CharlieCorner/pymage_downloader | argparsers.py | Python | apache-2.0 | 5,447 | 0.00257 | from argparse import ArgumentParser
VERSION = "2.0.0"
def parse_args():
"""Parse args with argparse
:returns: args
"""
parser = ArgumentParser(description=f"Pymage Downloader {VERSION} - Download pics from different sites")
build_site_subparsers(parser)
parser.add_argument('--destination', '-d',
dest='folder',
default='pymage_pics',
help="Defines a download folder.")
parser.add_argument("--overwrite", "-o",
dest="should_overwrite",
action="store_true",
help="Specifies if files should be overwritten if they were already downloaded.")
parser.add_argument("--debug",
dest="is_debug",
action="store_true",
help="Activates debug mode.")
args = parser.parse_args()
if args.site == "reddit":
if args.start_from and not args.start_from.startswith("t3_"):
args.start_from = "t3_" + args.start_from
return args
def build_site_subparsers(parser: ArgumentParser):
site_subparsers = parser.add_subparsers(title='Sites',
description="Choose from which site you'd like to download images",
help='supported sites',
dest="site")
site_subparsers.required = True
reddit_subparser(site_subparsers)
url_parser(site_subparsers)
def reddit_subparser(site_subparsers):
reddit_argparser = site_subparsers.add_parser('reddit')
reddit_modes = reddit_argparser.add_subparsers(title='Reddit Modes',
description="Choose if you'd like to manipulate a subreddit "
"or a user's posts",
dest="reddit_mode",
help='reddit modes')
# General options for Reddit
reddit_argparser.add_argument('--limit', '-l',
metavar='N',
type=int,
default=25,
help="Maximum URL limit per subreddit.")
reddit_argparser.add_argument("--imgur-html", "-ih",
dest="should_parse_imgur_html",
action="store_true",
help="Forces the use of the deprecated Imgur HTML parser "
"instead of the Imgur API Parser.")
reddit_argparser.add_argument('--page-limit', '-pl',
| dest="page_limit",
metavar='N',
type=int,
default=4,
help="Maximum amount of pages to get.")
reddit_argparser.add_argument('--start-from', '-sf',
| dest="start_from",
metavar='ID',
help="Post ID from which to get a listing.")
# Subreddit mode
subreddit_mode = reddit_modes.add_parser("subreddit",
description="Manipulate subreddits posts",
help='subreddit options')
subreddit_mode.add_argument('subreddit',
nargs='+',
metavar="SUBREDDITS",
help="List of the subreddits.")
subreddit_mode.add_argument('--period', '-p',
default='week',
choices=['hour', 'day', 'week', 'month', 'year', 'all'],
help="[h]our, [d]ay, [w]eek, [m]onth, [y]ear, or [a]ll. Period "
"of time from which you want images. Only works for top and controversial")
subreddit_mode.add_argument('--type', '-t',
default='hot',
choices=['hot', 'top', 'new', 'controversial'],
help="[hot], [top], [new], [controversial]. Type of listing of posts "
"in a subreddit.")
# User mode
user_mode = reddit_modes.add_parser("user",
description="Manipulate a user's posts",
help='user options')
user_mode.add_argument("user",
help="Specifies the user name.")
user_mode.add_argument("password",
default="",
help="Specifies the user name.")
user_mode.add_argument("--upvoted",
dest="should_get_upvoted",
action="store_true",
help="Specifies if the upvoted posts of a user should be fetched. Otherwise, "
"get the saved ones.")
def url_parser(site_subparsers):
url_argparser = site_subparsers.add_parser('url')
url_argparser.add_argument('url',
metavar="URL",
help="The URL from which to download images.")
|
zhaopu7/models | nmt_without_attention/nmt_without_attention.py | Python | apache-2.0 | 8,713 | 0.000574 | #!/usr/bin/env python
import sys
import gzip
import paddle.v2 as paddle
### Parameters
word_vector_dim = 620
latent_chain_dim = 1000
beam_size = 5
max_length = 50
def seq2seq_net(source_dict_dim, target_dict_dim, generating=False):
'''
Define the network structure of NMT, including encoder and decoder.
:param source_dict_dim: size of source dictionary
:type source_dict_dim : int
:param target_dict_dim: size of target dictionary
:type target_dict_dim: int
'''
decoder_size = encoder_size = latent_chain_dim
#### Encoder
src_word_id = paddle.layer.data(
name='source_language_word',
type=paddle.data_type.integer_value_sequence(source_dict_dim))
src_embedding = paddle.layer.embedding(
input=src_word_id, size=word_vector_dim)
# use bidirectional_gru
encoded_vector = paddle.networks.bidirectional_gru(
input=src_embedding,
size=encoder_size,
fwd_act=paddle.activation.Tanh(),
fwd_gate_act=paddle.activation.Sigmoid(),
bwd_act=paddle.activation.Tanh(),
bwd_gate_act=paddle.activation.Sigmoid(),
return_seq=True)
#### Decoder
encoder_last = paddle.layer.last_seq(input=encoded_vector)
encoder_last_projected = paddle.layer.mixed(
size=decoder_size,
act=paddle.activation.Tanh(),
input=paddle.layer.full_matrix_projection(input=encoder_last))
# gru step
def gru_decoder_without_attention(enc_vec, current_word):
'''
Step function for gru decoder
:param enc_vec: encoded vector of source language
:type enc_vec: layer object
:param current_word: current input of decoder
:type current_word: layer object
'''
decoder_mem = paddle.layer.memory(
name='gru_decoder',
size=decoder_size,
boot_layer=encoder_last_projected)
context = paddle.layer.last_seq(input=enc_vec)
decoder_inputs = paddle.layer.mixed(
size=decoder_size * 3,
input=[
paddle.layer.full_matrix_projection(input=context),
paddle.layer.full_matrix_projection(input=current_word)
])
gru_step = paddle.layer.gru_step(
name='gru_decoder',
act=paddle.activation.Tanh(),
gate_act=paddle.activation.Sigmoid(),
input=decoder_inputs,
output_mem=decoder_mem,
size=decoder_size)
out = paddle.layer.mixed(
size=target_dict_dim,
bias_attr=True,
act=paddle.activation.Softmax(),
input=paddle.layer.full_matrix_projection(input=gru_step))
return out
decoder_group_name = "decoder_group"
group_input1 = paddle.layer.StaticInput(input=encoded_vector, is_seq=True)
group_inputs = [group_input1]
if not generating:
trg_embedding = paddle.layer.embedding(
input=paddle.layer.data(
name='target_language_word',
type=paddle.data_type.integer_value_sequence(target_dict_dim)),
size=word_vector_dim,
param_attr=paddle.attr.ParamAttr(name='_target_language_embedding'))
group_inputs.append(trg_embedding)
decoder = paddle.layer.recurrent_group(
name=decoder_group_name,
step=gru_decoder_without_attention,
input=group_inputs)
lbl = paddle.layer.dat | a(
name='target_language_next_word',
type=paddle.data_type.integer_value_sequence(target_dict_dim))
cost = paddle.layer.classification_cost(input=decoder, label=lbl)
return cost
else:
trg_embedding = paddle.layer.GeneratedInput(
size=target_dict_dim,
embedding_name='_target_language_embedding',
embedding_size=word_vector_dim)
group_inputs.append(trg_embedding)
beam_gen = paddle.layer.beam | _search(
name=decoder_group_name,
step=gru_decoder_without_attention,
input=group_inputs,
bos_id=0,
eos_id=1,
beam_size=beam_size,
max_length=max_length)
return beam_gen
def train(source_dict_dim, target_dict_dim):
'''
Training function for NMT
:param source_dict_dim: size of source dictionary
:type source_dict_dim: int
:param target_dict_dim: size of target dictionary
:type target_dict_dim: int
'''
# initialize model
cost = seq2seq_net(source_dict_dim, target_dict_dim)
parameters = paddle.parameters.create(cost)
# define optimize method and trainer
optimizer = paddle.optimizer.RMSProp(
learning_rate=1e-3,
gradient_clipping_threshold=10.0,
regularization=paddle.optimizer.L2Regularization(rate=8e-4))
trainer = paddle.trainer.SGD(
cost=cost, parameters=parameters, update_equation=optimizer)
# define data reader
wmt14_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.wmt14.train(source_dict_dim), buf_size=8192),
batch_size=55)
# define event_handler callback
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 100 == 0 and event.batch_id > 0:
with gzip.open('models/nmt_without_att_params_batch_%d.tar.gz' %
event.batch_id, 'w') as f:
parameters.to_tar(f)
if event.batch_id % 10 == 0:
print "\nPass %d, Batch %d, Cost %f, %s" % (
event.pass_id, event.batch_id, event.cost, event.metrics)
else:
sys.stdout.write('.')
sys.stdout.flush()
# start to train
trainer.train(
reader=wmt14_reader, event_handler=event_handler, num_passes=2)
def generate(source_dict_dim, target_dict_dim, init_models_path):
'''
Generating function for NMT
:param source_dict_dim: size of source dictionary
:type source_dict_dim: int
:param target_dict_dim: size of target dictionary
:type target_dict_dim: int
:param init_models_path: path for inital model
:type init_models_path: string
'''
# load data samples for generation
gen_creator = paddle.dataset.wmt14.gen(source_dict_dim)
gen_data = []
for item in gen_creator():
gen_data.append((item[0], ))
beam_gen = seq2seq_net(source_dict_dim, target_dict_dim, True)
with gzip.open(init_models_path) as f:
parameters = paddle.parameters.Parameters.from_tar(f)
# prob is the prediction probabilities, and id is the prediction word.
beam_result = paddle.infer(
output_layer=beam_gen,
parameters=parameters,
input=gen_data,
field=['prob', 'id'])
# get the dictionary
src_dict, trg_dict = paddle.dataset.wmt14.get_dict(source_dict_dim)
# the delimited element of generated sequences is -1,
# the first element of each generated sequence is the sequence length
seq_list, seq = [], []
for w in beam_result[1]:
if w != -1:
seq.append(w)
else:
seq_list.append(' '.join([trg_dict.get(w) for w in seq[1:]]))
seq = []
prob = beam_result[0]
for i in xrange(len(gen_data)):
print "\n*******************************************************\n"
print "src:", ' '.join([src_dict.get(w) for w in gen_data[i][0]]), "\n"
for j in xrange(beam_size):
print "prob = %f:" % (prob[i][j]), seq_list[i * beam_size + j]
def usage_helper():
print "Please specify training/generating phase!"
print "Usage: python nmt_without_attention_v2.py --train/generate"
exit(1)
def main():
if not (len(sys.argv) == 2):
usage_helper()
if sys.argv[1] == '--train':
generating = False
elif sys.argv[1] == '--generate':
generating = True
else:
usage_helper()
# initialize paddle
paddle.init(use_gpu=False, trainer_count=1)
source_language_dict_dim = 30000
target_language_dict_dim = 30000
if generating:
# modify this p |
willicab/instalador | instalador/clases/particiones.py | Python | gpl-2.0 | 15,469 | 0.001685 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# =============================================================================
# PAQUETE: instalador
# ARCHIVO: instalador/config.py
# COPYRIGHT:
# (C) 2012 William Abrahan Cabrera Reyes <william@linux.es>
# (C) 2012 Erick Manuel Birbe Salazar <erickcion@gmail.com>
# (C) 2012 Luis Alejandro Martínez Faneyth <luis@huntingbears.com.ve>
# LICENCIA: GPL-2
# =============================================================================
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from instalador.clases.common import ProcessGenerator, espacio_usado
from instalador.config import FSPROGS
import _ped
import parted
class Particiones():
def __init__(self):
pass
def nueva_tabla_particiones(self, drive, t):
dev = parted.Device(drive)
new = parted.freshDisk(dev, t)
while True:
if not dev.busy:
break
try:
new.commit()
except _ped.IOException, x:
print x
def lista_discos(self):
'''
devuelve los discos que están conectados al equipo
'''
l = []
dev = parted.getAllDevices()
for d in dev:
if not d.readOnly:
l.append(d.path)
return sorted(l)
def lista_particiones(self, disco):
'''
Crea una lista de particiones disponibles en un disco dado
'''
l = []
p = []
try:
dev = parted.Device(disco)
disk = parted.Disk(dev)
except _ped.DiskLabelException:
return p
sectorsize = dev.sectorSize
total = float(dev.getSize(unit='KB'))
for j in disk.partitions:
l.append(j)
for w in disk.getFreeSpacePartitions():
l.append(w)
for i in l:
code = i.type
part = i.path
ini = float(i.geometry.start * sectorsize / 1024.0)
fin = float(i.geometry.end * sectorsize / 1024.0)
tam = float(i.geometry.length * sectorsize / 1024.0)
num = int(i.number)
usado = tam
libre = float(0)
if num != -1:
if i.fileSystem != None:
if code != 2:
if i.fileSystem.type == 'linux-swap(v1)':
fs = 'swap'
usado = tam
libre = float(0)
else:
fs = i.fileSystem.type
usado = espacio_usado(fs, part)
libre = tam - usado
else:
if code == 2:
fs = 'extended'
else:
fs = 'unknown'
flags = i.getFlagsAsString().split(', ')
else:
fs = 'free'
libre = tam
usado = float(0)
flags = ''
if not flags:
flags = 'none'
if libre < 0 or usado == 'unknown':
libre = float(0)
usado = tam
if code == 0 or code == 4:
tipo = 'primary'
elif code == 1 or code == 5:
tipo = 'logical'
elif code == 2:
tipo = 'extended'
p.append(
[part, ini, fin, tam, fs, tipo, flags, usado, libre, total, num]
)
return sorted(p, key=lambda particiones: particiones[1])
def nombre_particion(self, drive, ptype, start, end):
dev = parted.Device(drive)
disk = parted.Disk(dev)
s_sec = start * 1024 / dev.sectorSize
e_sec = end * 1024 / dev.sectorSize
m_sec = ((e_sec - s_sec) / 2) + s_sec
if ptype == 'logical' or ptype == 'primary' or ptype == 1 or ptype == 0:
part = disk.getPartitionBySector(m_sec)
else:
part = disk.getExtendedPartition()
return part.path
def num_particion(self, drive, ptype, start, end):
dev = parted.Device(drive)
disk = parted.Disk(dev)
s_sec = start * 1024 / dev.sectorSize
e_sec = end * 1024 / dev.sectorSize
m_sec = ((e_sec - s_sec) / 2) + s_sec
if ptype == 'logical' or ptype == 'primary' or ptype == 1 or ptype == 0:
part = disk.getPartitionBySector(m_sec)
else:
part = disk.getExtendedPartition()
return part.number
def crear_particion(self, drive, start, end, fs, partype, format):
'''
Argumentos:
- disco: el disco donde se realizará la partición. Ej: /dev/sda
- tipo: el tipo de partición a realizar {primary, extended, logical}
- formato: el formato que usará la partición {ext2, ext4, linux-swap,fat32, ntfs}
- inicio: donde comenzará la partición, en kB
- fin: donde terminará la partición, en kB
'''
i = 0
j = 0
k = 0
dev = parted.Device(drive)
disk = parted.Disk(dev)
s_sec = start * 1024 / dev.sectorSize
e_sec = end * 1024 / dev.sectorSize
if partype == 'primary' or partype == 0:
pedtype = _ped.PARTITION_NORMAL
elif partype == 'logical' or partype == 1:
pedtype = _ped.PARTITION_LOGICAL
elif partype == 'extended' or partype == 2:
pedtype = _ped.PARTITION_EXTENDED
else:
return False
while True:
if not dev.busy:
break
try:
geometry = parted.Geometry(device=dev, start=s_sec, end=e_sec)
i += 1
except Exception, a:
print a
try:
constraint = parted.Constraint(exactGeom=geometry)
i += 1
except Exception, b:
print b
try:
partition = parted.Partition(disk=disk, type=pedtype, geometry=geometry)
i += 1
except Exception, c:
print c
try:
disk.addPartition(partition=partition, constraint=constraint)
i += 1
except Exception, d:
print d
|
try:
disk.commit()
i += 1
except Exception, e:
print e
if i == 5:
if pedtype == _ped.PARTITION_EXTENDED:
return True
else:
if fs in FSPROGS:
fo | r pid in FSPROGS[fs][3]:
pnum = self.num_particion(drive, partype, start, end)
if ProcessGenerator(pid.format(drive, pnum)).returncode == 0:
k += 1
if k == len(FSPROGS[fs][3]):
if format:
for mkfs in FSPROGS[fs][0]:
pname = self.nombre_particion(drive, partype, start, end)
if ProcessGenerator(mkfs.format(pname)).returncode == 0:
j += 1
if j == len(FSPROGS[fs][0]):
return True
else:
return False
else:
return True
else:
return False
else:
return False
el |
NeurodataWithoutBorders/api-python | nwb/check_schema.py | Python | bsd-3-clause | 2,669 | 0.010116 | # script to validate h5gate schema files using json schema
import os.path
import sys
# import json
import jsonschema
import ast
def load_schema(file_name):
""" Load Python file that contains JSON formatted as a Python dictionary.
Files in this format are used to store the schema because, unlike pure JSON,
they allow comments and formatting long strings to make them easier to read."""
if not os.path.isfile(file_name):
print ("Unable to locate file %s" % file_name)
sys.exit(1)
f = open(file_name)
file_con | tents = f.read()
f.close()
#
# with file(file_ | name) as f:
# file_contents = f.read()
try:
# use use ast.literal_eval to parse
pydict = ast.literal_eval(file_contents)
except Exception as e:
print ("** Unable to parse file '%s' (should be mostly JSON)" % file_name)
print ("Error is: %s" % e)
sys.exit(1)
assert isinstance(pydict, dict), "** File '%s does not contain python dictionary" % file_name
return pydict
def load_meta_schema():
meta_schema_file = "meta_schema.json"
if not os.path.isfile(meta_schema_file):
print ("Unable to locate file %s" % file_name)
sys.exit(1)
with file(meta_schema_file) as f:
file_contents = f.read()
meta_schema = json.loads(file_contents)
return meta_schema
if __name__ == "__main__":
nwb_dir = os.path.dirname(os.path.realpath(__file__))
meta_schema_file = os.path.join(nwb_dir, "meta_schema.py")
fs_var = "fs"
if len(sys.argv) != 2:
print ("format is:")
print ("python %s <specification_file>" % sys.argv[0])
print ("where <specification_file> is either the name of a schema file, or '-' for the")
print ("default core specification (nwb_core.py), or 'N' - to process the core specification")
print ("but not display the full path to it (so output will not depend on the location")
print ("NWB is installed in).")
sys.exit(0)
using_core = sys.argv[1] in ("-", "N")
schema_file = os.path.join(nwb_dir, 'nwb_core.py') if using_core else sys.argv[1]
filter_path = sys.argv[1] == "N"
meta_schema = load_schema(meta_schema_file)
schema = load_schema(schema_file)
if fs_var not in schema:
print ("** Error, key '%s' not defined in top level of file '%s'" % (fs_var, schema_file))
sys.exit(1)
schema = schema[fs_var]
display_name = ".../nwb/nwb_core.py" if filter_path else schema_file
print ("checking specification in file '%s'" % display_name)
jsonschema.validate(schema, meta_schema)
print ("File is valid.")
|
openhdf/enigma2-wetek | keyids.py | Python | gpl-2.0 | 5,381 | 0.057053 | KEYIDS = {
"KEY_RESERVED": 0,
"KEY_ESC": 1,
"KEY_1": 2,
"KEY_2": 3,
"KEY_3": 4,
"KEY_4": 5,
"KEY_5": 6,
"KEY_6": 7,
"KEY_7": 8,
"KEY_8": 9,
"KEY_9": 10,
"KEY_0": 11,
"KEY_MINUS": 12,
"KEY_EQUAL": 13,
"KEY_BACKSPACE": 14,
"KEY_TAB": 15,
"KEY_Q": 16,
"KEY_W": 17,
"KEY_E": 18,
"KEY_R": 19,
"KEY_T": 20,
"KEY_Y": 21,
"KEY_U": 22,
"KEY_I": 23,
"KEY_O": 24,
"KEY_P": 25,
"KEY_LEFTBRACE": 26,
"KEY_RIGHTBRACE": 27,
"KEY_ENTER": 28,
"KEY_LEFTCTRL": 29,
"KEY_A": 30,
"KEY_S": 31,
"KEY_D": 32,
"KEY_F": 33,
"KEY_G": 34,
"KEY_H": 35,
"KEY_J": 36,
"KEY_K": 37,
"KEY_L": 38,
"KEY_SEMICOLON": 39,
"KEY_APOSTROPHE": 40,
"KEY_GRAVE": 41,
"KEY_LEFTSHIFT": 42,
"KEY_BACKSLASH": 43,
"KEY_Z": 44,
"KEY_X": 45,
"KEY_C": 46,
"KEY_V": 47,
"KEY_B": 48,
"KEY_N": 49,
"KEY_M": 50,
"KEY_COMMA": 51,
"KEY_DOT": 52,
"KEY_SLASH": 53,
"KEY_RIGHTSHIFT": 54,
"KEY_KPASTERISK": 55,
"KEY_LEFTALT": 56,
"KEY_SPACE": 57,
"KEY_CAPSLOCK": 58,
"KEY_F1": 59,
"KEY_F2": 60,
"KEY_F3": 61,
"KEY_F4": 62,
"KEY_F5": 63,
"KEY_F6": 64,
"KEY_F7": 65,
"KEY_F8": 66,
"KEY_F9": 67,
"KEY_F10": 68,
"KEY_NUMLOCK": 69,
"KEY_SCROLLLOCK": 70,
"KEY_KP7": 71,
"KEY_KP8": 72,
"KEY_KP9": 73,
"KEY_KPMINUS": 74,
"KEY_KP4": 75,
"KEY_KP5": 76,
"KEY_KP6": 77,
"KEY_KPPLUS": 78,
"KEY_KP1": 79,
"KEY_KP2": 80,
"KEY_KP3": 81,
"KEY_KP0": 82,
"KEY_KPDOT": 83,
"KEY_103RD": 84,
"KEY_F13": 85,
"KEY_102ND": 86,
"KEY_F11": 87,
"KEY_F12": 88,
"KEY_F14": 89,
"KEY_F15": 90,
"KEY_F16": 91,
"KEY_F17": 92,
"KEY_F18": 93,
"KEY_F19": 94,
"KEY_F20": 95,
"KEY_KPENTER": 96,
"KEY_RIGHTCTRL": 97,
"KEY_KPSLASH": 98,
"KEY_SYSRQ": 99,
"KEY_RIGHTALT": 100,
"KEY_LINEFEED": 101,
"KEY_HOME": 102,
"KEY_UP": 103,
"KEY_PAGEUP": 104,
"KEY_LEFT": 105,
"KEY_RIGHT": 106,
"KEY_END": 107,
"KEY_DOWN": 108,
"KEY_PAGEDOWN": 109,
"KEY_INSERT": 110,
"KEY_DELETE": 111,
"KEY_MACRO": 112,
"KEY_MUTE": 113,
"KEY_VOLUMEDOWN": 114,
"KEY_VOLUMEUP": 115,
"KEY_POWER": 116,
"KEY_KPEQUAL": 117,
"KEY_KPPLUSMINUS": 118,
"KEY_PAUSE": 119,
"KEY_F21": 120,
"KEY_F22": 121,
"KEY_F23": 122,
"KEY_F24": 123,
"KEY_KPCOMMA": 124,
"KEY_LEFTMETA": 125,
"KEY_RIGHTMETA": 126,
"KEY_COMPOSE": 127,
"KEY_STOP": 128,
"KEY_AGAIN": 129,
"KEY_PROPS": 130,
"KEY_UNDO": 131,
"KEY_FRONT": 132,
"KEY_COPY": 133,
"KEY_OPEN": 134,
"KEY_PASTE": 135,
"KEY_FIND": 136,
"KEY_CUT": 137,
"KEY_HELP": 138,
"KEY_MENU": 139,
"KEY_CALC": 140,
"KEY_SETUP": 141,
"KEY_SLEEP": 142,
"KEY_WAKEUP": 143,
"KEY_FILE": 144,
"KEY_SENDFILE": 145,
"KEY_DELETEFILE": 146,
"KEY_XFER": 147,
"KEY_PROG1": 148,
"KEY_PROG2": 149,
"KEY_WWW": 150,
"KEY_MSDOS": 151,
"KEY_COFFEE": 152,
"KEY_DIRECTION": 153,
"KEY_CYCLEWINDOWS": 154,
"KEY_MAIL": 155,
"KEY_BOOKMARKS": 156,
"KEY_COMPUTER": 157,
"KEY_BACK": 158,
"KEY_FORWARD": 159,
"KEY_CLOSECD": 160,
"KEY_EJECTCD": 161,
"KEY_EJECTCLOSECD": 162,
"KEY_NEXTSONG": 163,
"KEY_PLAYPAUSE": 164,
"KEY_PREVIOUSSONG": 165,
"KEY_STOPCD": 166,
"KEY_RECORD": 167,
"KEY_REWIND": 168,
"KEY_PHONE": 169,
"KEY_ISO": 170,
"KEY_CONFIG": 171,
"KEY_HOMEPAGE": 172,
"KEY_REFRESH": 173,
"KEY_EXIT": 174,
"KEY_MOVE": 175,
"KEY_EDIT": 176,
"KEY_SCROLLUP": 177,
"KEY_SCROLLDOWN": 178,
"KEY_KPLEFTPAREN": 179,
"KEY_KPRIGHTPAREN": 180,
"KEY_INTL1": 181,
"KEY_INTL2": 182,
"KEY_INTL3": 183,
"KEY_INTL4": 184,
"KEY_INTL5": 185,
"KEY_INTL6": 186,
"KEY_INTL7": 187,
"KEY_INTL8": 188,
"KEY_INTL9": 189,
"KEY_LANG1": 190,
"KEY_LANG2": 191,
"KEY_LANG3": 192,
"KEY_LANG4": 193,
"KEY_LANG5": 194,
"KEY_LANG6": 195,
"KEY_LANG7": 196,
"KEY_LANG8": 197,
"KEY_LANG9": 198,
"KEY_PLAYCD": 200,
"KEY_PAUSECD": 201,
"KEY_PROG3": 202,
"KEY_PROG4": 203,
"KEY_SUSPEND": 205,
"KEY_CLOSE": 206,
"KEY_PLAY": 207,
"KEY_FASTFORWARD": 208,
"KEY_BASSBOOST": 209,
"KEY_PRINT": 210,
"KEY_HP": 211,
"KEY_CAMERA": 212,
"KEY_SOUND": 213,
"KEY_QUESTION": 214,
"KEY_EMAIL": 215,
"KEY_CHAT": 216,
"KEY_SEARCH": 217,
"KEY_CONNECT": 218,
"KEY_FINANCE": 219,
"KEY_SPORT": 220,
"KEY_SHOP": 221,
"KEY_ALTERASE": 222,
"KEY_CANCEL": 223,
"KEY_BRIGHTNESSDOWN": 224,
"KEY_BRIGHTNESSUP": 225,
"KEY_MEDIA": 226,
"KEY_VMODE": 227,
"KEY_LAN": 238,
"KEY_UNKNOWN": 240,
"BtnA": 304,
"BtnY": 308,
"BtnB": 305,
"BtnX": 307,
"BtnTL": 310,
"BtnTR": 311,
"KEY_OK": 352,
"KEY_SELECT": 353,
"KEY_GOTO": 354,
"KEY_CLEAR": 355,
"KEY_POWER2": 356,
"KEY_OPTION": 357,
"KEY_INFO": 358,
"KEY_TIME": 359,
"KEY_VENDOR": 360,
"KEY_ARCHIVE": 361,
"KEY_PROGRAM": 362,
"KEY_CHANNEL": 363,
"KEY_FAVORITES | ": 364,
"KEY_EPG": 365,
"KEY_PVR": 366,
"KEY_MHP": 367,
"KEY_LANGUAGE": 368,
"KEY_TITLE": 369,
"KEY_SUBTITLE": 370,
"KEY_ANGLE": 371,
"KEY_ZOOM": 372,
"KEY_MODE": 373,
"KEY_KEYBOARD": 374,
"KEY_SCREEN": 375,
"KEY_PC": 376,
"KEY_TV": 377,
"KEY_TV2": 378,
"KEY_VCR": 379,
"KEY_VCR2": 380,
"KEY_SAT": 381,
"KEY_SAT2": 382,
"KEY_CD": 383,
"KEY_TAPE": 384,
"KEY_RADIO": 385,
"KEY_TUNER": 386,
"KEY_PLAYER": 387,
"KEY_TEXT": 388,
"KEY_DVD": 389,
"KEY_AUX": 390,
"KEY_MP3": 391,
"KEY_AUDIO": 392,
| "KEY_VIDEO": 393,
"KEY_DIRECTORY": 394,
"KEY_LIST": 395,
"KEY_MEMO": 396,
"KEY_CALENDAR": 397,
"KEY_RED": 398,
"KEY_GREEN": 399,
"KEY_YELLOW": 400,
"KEY_BLUE": 401,
"KEY_CHANNELUP": 402,
"KEY_CHANNELDOWN": 403,
"KEY_FIRST": 404,
"KEY_LAST": 405,
"KEY_AB": 406,
"KEY_NEXT": 407,
"KEY_RESTART": 408,
"KEY_SLOW": 409,
"KEY_SHUFFLE": 410,
"KEY_BREAK": 411,
"KEY_PREVIOUS": 412,
"KEY_DIGITS": 413,
"KEY_TEEN": 414,
"KEY_TWEN": 415,
"KEY_CONTEXT_MENU": 438,
"KEY_DEL_EOL": 448,
"KEY_DEL_EOS": 449,
"KEY_INS_LINE": 450,
"KEY_DEL_LINE": 451,
"KEY_ASCII": 510,
"KEY_MAX": 511,
"BTN_0": 256,
"BTN_1": 257,
}
|
vaibhav-singh/django-travis-setup | django_travis_setup/example/apps.py | Python | mit | 154 | 0 | # | -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class ExampleConfig(AppConfig):
name = 'exa | mple'
|
DaanHoogland/cloudstack | systemvm/debian/opt/cloud/bin/cs_forwardingrules.py | Python | apache-2.0 | 3,239 | 0.000617 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writin | g,
# software distributed under the License i | s distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
def merge(dbag, rules):
for rule in rules["rules"]:
source_ip = rule["source_ip_address"]
destination_ip = rule["destination_ip_address"]
revoke = rule["revoke"]
newrule = dict()
newrule["public_ip"] = source_ip
newrule["internal_ip"] = destination_ip
if rules["type"] == "staticnatrules":
newrule["type"] = "staticnat"
elif rules["type"] == "forwardrules":
newrule["type"] = "forward"
newrule["public_ports"] = rule["source_port_range"]
newrule["internal_ports"] = rule["destination_port_range"]
newrule["protocol"] = rule["protocol"]
if not revoke:
if rules["type"] == "staticnatrules":
dbag[source_ip] = [newrule]
elif rules["type"] == "forwardrules":
index = -1
if source_ip in dbag.keys():
for forward in dbag[source_ip]:
if ruleCompare(forward, newrule):
index = dbag[source_ip].index(forward)
if not index == -1:
dbag[source_ip][index] = newrule
else:
dbag[source_ip].append(newrule)
else:
dbag[source_ip] = [newrule]
else:
if rules["type"] == "staticnatrules":
if source_ip in dbag.keys():
del dbag[source_ip]
elif rules["type"] == "forwardrules":
if source_ip in dbag.keys():
index = -1
for forward in dbag[source_ip]:
if ruleCompare(forward, newrule):
index = dbag[source_ip].index(forward)
print "removing index %s" % str(index)
if not index == -1:
del dbag[source_ip][index]
return dbag
# Compare function checks only the public side, those must be equal the internal details could change
def ruleCompare(ruleA, ruleB):
if not ruleA["type"] == ruleB["type"]:
return False
if ruleA["type"] == "staticnat":
return ruleA["public_ip"] == ruleB["public_ip"]
elif ruleA["type"] == "forward":
return ruleA["public_ip"] == ruleB["public_ip"] and ruleA["public_ports"] == ruleB["public_ports"] \
and ruleA["protocol"] == ruleB["protocol"]
|
Fale/ansible | lib/ansible/modules/debug.py | Python | gpl-3.0 | 2,420 | 0.003719 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012 Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: debug
short_description: Print statements during execution
description:
- This module prints statements during execution and can be useful
for debugging variables or expressions without necessarily halting
the playbook.
- Useful for debugging together with the 'when:' directive.
- This module is also supported for Windows targets.
version_added: '0.8'
options:
msg:
description:
- The customized message that is printed. If omitted, prints a generic message.
type: str
default: 'Hello world!'
var:
description:
- A variable name to debug.
- Mutually exclusive with the C(msg) option.
- Be aware that this option already runs in Jinja2 context and has an implicit C({{ }}) wrapping,
so you should not be using Jinja2 delimiters unless you are looking for double interpolation.
type: str
verbosity:
description:
- A number that controls when the debug is run, if you set to 3 it will only run debug when -vvv or above
type: int
default: 0
version_added: '2.1'
notes:
- This module is also supported for Windows targets.
seealso:
- module: ansible.builtin.assert
- module: ansible.builtin.fail
author:
- Dag Wieers (@dagwieers)
- Michael DeHaan
'''
EXAMPLES = r'''
- name: Print the gateway for each host when defined
ansible.builtin.debug:
msg: System {{ inventory_h | ostname }} has gateway {{ ansible_default_ipv4.gateway }}
when: ansible_default_ipv4.gateway is defined
- name: Get uptime information
ansible.builtin.shell: /usr/bin/uptime
register: result
- name: Print return information from the previous task
ansible.builtin.debug:
var: result
verbosity: 2
- name: Display all variables/facts known for a host
ansible.builtin.debug:
var: hostvars[inventory_hostname]
verbosity: 4
- name: Prints t | wo lines of messages, but only if there is an environment value set
ansible.builtin.debug:
msg:
- "Provisioning based on YOUR_KEY which is: {{ lookup('env', 'YOUR_KEY') }}"
- "These servers were built using the password of '{{ password_used }}'. Please retain this for later use."
'''
|
arju88nair/projectCulminate | venv/lib/python3.5/site-packages/astroid/brain/brain_subprocess.py | Python | apache-2.0 | 3,314 | 0.001509 | # Copyright (c) 2016 Claudiu Popa <pcmanticore@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import sys
import textwrap
import six
import astroid
PY33 = sys.version_info >= (3, 3)
PY36 = sys.version_info >= (3, 6)
def _subprocess_transform():
if six.PY3:
communicate = (bytes('string', 'ascii'), bytes('string', 'ascii'))
communicate_signature = 'def communicate(self, input=None, timeout=None)'
if PY36:
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0, restore_signals=True,
start_new_session=False, pass_fds=(), *,
encoding=None, errors=None):
pass
"""
else:
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0, restore_signals=True,
start_new_session=False, pass_fds=()):
pass
"""
else:
communicate = ('string', 'string')
communicate_signature = 'def communicate(self, input=None)'
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
pass
"""
if PY33:
wait_signature = 'def wait(self, timeout=None)'
else:
wait_signature = 'def wait(self)'
if six.PY3:
ctx_manager = '''
def __enter__(self): return self
def __exit__(self, *args): pass
'''
else:
ctx_manager = ''
c | ode = textwrap.dedent('''
class Popen(object):
returncode = pid = 0
stdin = stdout = stderr = file()
%(communicate_signature)s:
return %(communicate)r
%(wait_signature)s:
return self.returncode
def poll(self):
return self.returncode
def send_signal(self, signal):
pass
| def terminate(self):
pass
def kill(self):
pass
%(ctx_manager)s
''' % {'communicate': communicate,
'communicate_signature': communicate_signature,
'wait_signature': wait_signature,
'ctx_manager': ctx_manager})
init_lines = textwrap.dedent(init).splitlines()
indented_init = '\n'.join([' ' * 4 + line for line in init_lines])
code += indented_init
return astroid.parse(code)
astroid.register_module_extender(astroid.MANAGER, 'subprocess', _subprocess_transform)
|
ryfeus/lambda-packs | pytorch/source/caffe2/python/layers/last_n_window_collector.py | Python | mit | 2,543 | 0.000393 | ## @package last_n_window_collector
# Module caffe2.python.layers.last_n_window_collector
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class LastNWindowCollector(ModelLayer):
"""
Collect last-N samples from input record. If you have complex data,
| use PackRecords to pack it before using this layer.
This layer is not thread safe.
"""
def __init__(self, model, input_record, num_to_collect,
| name='last_n_window_collector', **kwargs):
super(LastNWindowCollector, self).__init__(
model, name, input_record, **kwargs)
assert num_to_collect > 0
self.num_to_collect = num_to_collect
assert isinstance(input_record, schema.Scalar), \
"Got {!r}".format(input_record)
self.last_n = self.create_param(param_name='last_n',
shape=[0],
initializer=('ConstantFill', {}),
optimizer=model.NoOptim)
self.next_blob = self.create_param(
param_name='next',
shape=[],
initializer=('ConstantFill',
{'value': 0, 'dtype': core.DataType.INT32}),
optimizer=model.NoOptim
)
self.mutex = self.create_param(
param_name='mutex',
shape=None,
initializer=('CreateMutex',),
optimizer=model.NoOptim,
)
self.num_visited_blob = self.create_param(
param_name='num_visited',
shape=[],
initializer=('ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64,
}),
optimizer=model.NoOptim,
)
self.output_schema = schema.Struct(
(
'last_n',
schema.from_blob_list(input_record, [self.last_n])
),
('num_visited', schema.Scalar(blob=self.num_visited_blob)),
('mutex', schema.Scalar(blob=self.mutex)),
)
def add_ops(self, net):
net.LastNWindowCollector(
[self.last_n, self.next_blob, self.input_record(), self.mutex,
self.num_visited_blob],
[self.last_n, self.next_blob, self.num_visited_blob],
num_to_collect=self.num_to_collect,
)
|
deepmind/pysc2 | pysc2/lib/buffs.py | Python | apache-2.0 | 2,114 | 0.022706 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define the static list of buffs for SC2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
# pylint: disable=invalid-name
class Buffs(enum.IntEnum):
"""The list of buffs, as generated by bin/gen_data.py."""
BansheeCloak = 7
BlindingCloud = 8 | 3
BlindingCloudStructure = 38
CarryHarvestableVespeneGeyserGas = 273
CarryHarvestableVespeneGeyserGasProtoss = 274
CarryHarvestableVespeneGeyserGasZerg = 275
CarryHighYieldMineralFieldMinerals = 272
CarryMineralFieldMinerals = 271
ChannelSnipeCombat = 145
Charging = 30
ChronoBoostEnergyCost = 281
CloakFieldEffect = 29
C | ontaminated = 36
EMPDecloak = 16
FungalGrowth = 17
GhostCloak = 6
GhostHoldFire = 12
GhostHoldFireB = 13
GravitonBeam = 5
GuardianShield = 18
ImmortalOverload = 102
InhibitorZoneTemporalField = 289
LockOn = 116
LurkerHoldFire = 136
LurkerHoldFireB = 137
MedivacSpeedBoost = 89
NeuralParasite = 22
OracleRevelation = 49
OracleStasisTrapTarget = 129
OracleWeapon = 99
ParasiticBomb = 132
ParasiticBombSecondaryUnitSearch = 134
ParasiticBombUnitKU = 133
PowerUserWarpable = 8
PsiStorm = 28
QueenSpawnLarvaTimer = 11
RavenScramblerMissile = 277
RavenShredderMissileArmorReduction = 280
RavenShredderMissileTint = 279
Slow = 33
Stimpack = 27
StimpackMarauder = 24
SupplyDrop = 25
TemporalField = 121
ViperConsumeStructure = 59
VoidRaySpeedUpgrade = 288
VoidRaySwarmDamageBoost = 122
|
asimshankar/tensorflow | tensorflow/python/data/experimental/benchmarks/rejection_resample_benchmark.py | Python | apache-2.0 | 2,449 | 0.004492 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS | IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.rejection_resample()`."""
fro | m __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.data.experimental.ops import resampling
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
def _time_resampling(
test_obj, data_np, target_dist, init_dist, num_to_sample):
dataset = dataset_ops.Dataset.from_tensor_slices(data_np).repeat()
# Reshape distribution via rejection sampling.
dataset = dataset.apply(
resampling.rejection_resample(
class_func=lambda x: x,
target_dist=target_dist,
initial_dist=init_dist,
seed=142))
get_next = dataset_ops.make_one_shot_iterator(dataset).get_next()
with test_obj.test_session() as sess:
start_time = time.time()
for _ in xrange(num_to_sample):
sess.run(get_next)
end_time = time.time()
return end_time - start_time
class RejectionResampleBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.rejection_resample()`."""
def benchmarkResamplePerformance(self):
init_dist = [0.25, 0.25, 0.25, 0.25]
target_dist = [0.0, 0.0, 0.0, 1.0]
num_classes = len(init_dist)
# We don't need many samples to test a dirac-delta target distribution
num_samples = 1000
data_np = np.random.choice(num_classes, num_samples, p=init_dist)
resample_time = _time_resampling(
self, data_np, target_dist, init_dist, num_to_sample=1000)
self.report_benchmark(iters=1000, wall_time=resample_time, name="resample")
if __name__ == "__main__":
test.main()
|
nnabeyang/MY_jinja2 | MY_jinja2/visitor.py | Python | bsd-3-clause | 247 | 0.016194 | class NodeVisitor:
def visit(self, node, *args):
try:
retu | rn getattr(self, 'visit_' + node.__class__.__name__)(node, *args)
except AttributeError:
| pass
for child in node.iter_child_nodes():
self.visit(child, *args)
|
KlausPopp/Moddy | setup.py | Python | lgpl-3.0 | 1,397 | 0 | # created based on
# https://python-packaging.readthedocs.io/en | /latest/minimal.html
# But instead of python setup.py register sdist upload,
# use https://pypi.org/p/twine/
#
from setuptools import setup
import sys
import os
import re
sys.path.append("src")
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return "File '%s' not found.\n" % fname
def readVersion():
txt = read("src/moddy/version.py")
ver = re.fi | ndall(r"([0-9]+)", txt)
print("ver=%s" % ver)
return ver[0] + "." + ver[1] + "." + ver[2]
setup(
name="moddy",
install_requires=["svgwrite"],
version=readVersion(),
description="A discrete event simulator generating sequence diagrams",
long_description=read("README.rst"),
url="https://github.com/KlausPopp/Moddy",
project_urls={
"Documentation": "https://klauspopp.github.io/Moddy/",
"Source Code": "https://github.com/KlausPopp/Moddy/",
},
keywords="simulation modelling",
author="Klaus Popp",
author_email="klauspopp@gmx.de",
license="LGPL-3.0",
platforms="OS Independent",
package_dir={"": "src"},
packages=[
"moddy",
"moddy.seq_diag_interactive_viewer",
"moddy.lib",
"moddy.lib.net",
],
package_data={"moddy.seq_diag_interactive_viewer": ["*.css", "*.js"]},
)
|
MokaCreativeLLC/XNATSlicer | XNATSlicer/XnatSlicerLib/ui/FolderMaker.py | Python | bsd-3-clause | 18,465 | 0.006986 | __author__ = "Sunil Kumar (kumar.sunil.p@gmail.com)"
__copyright__ = "Copyright 2014, Washington University in St. Louis"
__credits__ = ["Sunil Kumar", "Steve Pieper", "Dan Marcus"]
__license__ = "XNAT Software License Agreement " + \
"(see: http://xnat.org/about/license.php)"
__version__ = "2.1.1"
__maintainer__ = "Rick Herrick"
__email__ = "herrickr@mir.wustl.edu"
__status__ = "Production"
# application
from __main__ import qt
from __main__ import slicer
# external
from Xnat import *
from MokaUtils import *
# module
from XnatSlicerUtils import *
from XnatSlicerGlobals import *
class FolderMaker(qt.QWidget):
"""
FolderMaker is used for creating new folders within XNAT at the projects,
subjects, experiments levels.
"""
FONT_NAME = 'Arial'
FONT_SIZE = 10
EVENT_TYPES = [
'folderAdded',
]
def __init__(self, parent, View):
"""
@param parent: Parent widget.
@type parent: qt.QWidget
@param View: The View module of XNATSlicer.
@type View: View
"""
#--------------------
# Call parent init.
#--------------------
super(FolderMaker, self).__init__(parent)
#--------------------
# Params
#--------------------
self.View = View
self.Events = MokaUtils.Events(self.EVENT_TYPES)
self.__xsiList = qt.QComboBox()
self.__xsiList.addItems([key for key, value in \
Xnat.xsi.DEFAULT_TYPES.iteritems()])
self.__levelLabels = {}
self.__lineEdits = {}
self.__errorLines = {}
self.__levelTracker = {}
self.__nextLevelList = []
#--------------------
# Inits
#--------------------
self.__initWindow()
self.__createWidgets()
self.__createButtons()
self.__setLayout();
def show(self):
"""
Inherits from qt.QWidget. Conducts some custom routines as well.
"""
try:
for key, widget in self.__lineEdits.iteritems():
widget.clear()
except:
pass
selectedXnatLevel = self.__getSelectedXnatLevel()
self.__trackRelevantNodes(selectedXnatLevel)
#--------------------
# Hide and show to bring back to front
#--------------------
self.hide()
qt.QWidget.show(self)
#--------------------
# Enable line edits
#--------------------
for level, item in self.__lineEdits.iteritems():
item.setEnabled(True)
#--------------------
# Prepopulate
#--------------------
self.__prepopulate_ByViewSelection(selectedXnatLevel)
def eventFilter(self, widget, event):
"""
Inherits from qt.QWidget -- does not need to be called programatically.
Event filter for line edit interaction.
Refer to: U{http://qt-project.org/doc/qt-4.8/qevent.html#Type-enum} for
more information.
@param widget: The widget calling the event.
@type widget: qt.QWidget
@param event: The QT event.
@type event: number
"""
for level, lineEdit in self.__lineEdits.iteritems():
# click or key release
| if (event.type() == 3 or event.type() == 7) and widget == lineEdit:
#print "CLICK!"
if widget.enabled:
self.__onLineEditTextChanged(level, lineEdit.text)
def __getSelectedXnatLevel(self):
"""
Get the current XNAT level of the selected node in the viewer. If
higher than 'experiments' we default to experiments If no node is
selected, we just assume it's projects
@return: The XNAT lev | el valid for adding a folder. (Usually between
'projects' and 'experiments')
@rtype: string
"""
selectedXnatLevel = ''
try:
selectedXnatLevel = self.View.getItemLevel()
if not selectedXnatLevel in self.xnatLevels:
selectedXnatLevel = self.xnatLevels[-1]
except Exception, e:
selectedXnatLevel = 'projects'
return selectedXnatLevel
def __generateUriFromLines(self):
"""
Generates an XNAT uri to create a folder based on the line edits.
@return: The generated XNAT uri.
@rtype: str
"""
# Construct URI based on XNAT rules.
xnatUri = ''
for level in self.xnatLevels:
lineText = XnatSlicerUtils.toPlainText(self.__lineEdits[level].text)
if len(lineText) > 0:
xnatUri += '/' + level + '/'
uriAdd = lineText
xnatUri += uriAdd
# Special case for experiments
if level == 'experiments':
xnatUri += '?xsiType=' + \
Xnat.xsi.DEFAULT_TYPES[self.__xsiList.currentText]
else:
break
return xnatUri
def __clearErrorLines(self):
"""
Clears the error lines.
"""
#MokaUtils.debug.lf()
for key, errorLine in self.__errorLines.iteritems():
errorLine.setText('')
def __onAddButtonClicked(self):
"""
Callback when the add button is clicked.
"""
self.close()
self.__clearErrorLines()
xnatUri = self.__generateUriFromLines()
#MokaUtils.debug.lf(xnatUri)
self.Events.runEventCallbacks('folderAdded', xnatUri)
def __onButtonClicked(self, button):
"""
Callback if the create button is clicked. Communicates with
XNAT to create a folder. Details below.
@param button: The button that was clicked.
@type button: qt.QAbstractButton
"""
if 'add' in button.text.lower():
self.__onAddButtonClicked()
elif 'cancel' in button.text.lower():
self.close()
def __onLineEditTextChanged(self, level, text):
"""
Validates the line edit text for the folder to add:
- Checks for invalid characters.
- Checks if the name is already taken.
- Populates the line edits accordingly.
- Unpopulates the line edits accordingly.
@param level: The level of the pertaining lineEdit.
@type level: string
@param text: The text in the pertaining lineEdit.
@type text: string
"""
if hasattr(self, 'addButton'):
self.addButton.setEnabled(True)
if len(text.strip(" ")) > 0 and self.View.currentItem():
self.__prepopulate_byParentLines(level)
self.__unpopulateChildLines(level)
self.__errorLines[level].setText('')
self.__checkTextInvalid(level, text)
self.__checkTextInUse(level, text)
else:
if hasattr(self, 'addButton'):
self.addButton.setEnabled(False)
def __prepopulate_ByViewSelection(self, level):
"""
Prepopulates the lineEdits based on the selection in the View.
@param level: The level of the current lineEdit.
@type level: string
"""
# Only do this for non-projects
levelInd = self.xnatLevels.index(level)
if levelInd > 0:
levelInd -= 1
while levelInd > -1:
#MokaUtils.debug.lf(self.__levelTracker)
currLevel = self.xnatLevels[levelInd]
val = self.__levelTracker[currLevel][0]
self.__lineEdits[currLevel].setText(val)
self.__lineEdits[currLevel].setEnabled(False)
levelInd -= 1
def __prepopulate_byParentLines(self, level, item = None):
"""
Prepopulates the line edits based on the the parent lines of the
currently edited line edit.
@param level: The level of the current lineEdit.
@type level: string
"""
currNodeText = self.View.getItemName() if not item else \
sel |
amhokies/Timetable-Stalker | course_search.py | Python | mit | 2,048 | 0.000488 | from bs4 import BeautifulSoup
from models.course import Course
import requests
default_postdata = {
'CAMPUS': '0',
'TERMYEAR': '201709',
'CORE_CODE': 'AR%',
'subj_code': '',
'CRSE_NUMBER': '',
'crn': '',
'open_only': 'on',
'BTN_PRESSED': 'FIND class sections',
}
url = 'https://banweb.banner.vt.edu/ssb/prod/HZSKVTSC.P_ProcRequest'
def _get_open_courses(data):
req = requests.post(url, data=data)
soup = BeautifulSoup(req.content, 'html5lib')
rows = soup.select('table.dataentrytable tbody tr')
open_courses = list()
# The first row is the header row with the column labels
# If there's only one row, the rest of the table is empty, so there are no results
if len(rows) > 1:
rows = rows[1:]
for row in rows:
cells = row.select('td')
cells_text = list(map(lambda x: x.get_text(), cells))
crn = cells_text[0].strip( | )
label = cell | s_text[1].strip()
title = cells_text[2].strip()
professor = cells_text[6].strip()
open_courses.append(Course(crn, label, title, professor))
return open_courses
def get_open_courses_by_course(subj, num, semester):
""" Get the open courses that match the course subject and number passed in
:param subj: The subject abbreviation
:param num: The course number
:return: Returns a list of the open courses that are matched
"""
postdata = default_postdata.copy()
postdata['subj_code'] = subj.strip().upper()
postdata['CRSE_NUMBER'] = num.strip()
postdata['TERMYEAR'] = semester
return _get_open_courses(postdata)
def get_open_courses_by_crn(crn, semester):
""" Get the open course that matches the crn passed in
:param crn: The course request number of the course section
:return: Returns a list of the open courses that are matched
"""
postdata = default_postdata.copy()
postdata['crn'] = crn.strip()
postdata['TERMYEAR'] = semester
return _get_open_courses(postdata)
|
marklescroart/bvp | bvp/utils/__init__.py | Python | bsd-2-clause | 165 | 0.012121 | """
Initialization of util | s.
Couple handy-dandy functions:
"""
from __future__ import absolute_import
from | . import basics
from . import blender
from . import math |
balolam/university-mpi-python | practical_task_1/task2.py | Python | apache-2.0 | 516 | 0.001938 | from mpi4py | import MPI
from utils.mpi_helper import finalize
from utils.mpi_helper import init
init()
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
print "process [", rank, "] - running"
if rank == 0:
data = {'name': "PROCESS-" + str(rank), 'msg': "Hello"}
req = comm.isend(data, dest=1, tag=11)
req.wait()
elif rank == 1:
req = comm.irecv(source=0, tag=11)
data = req.wait()
print "process-" + str(rank) + " receive: | ", data['name'], "say", data['msg']
finalize()
|
huzq/scikit-learn | examples/gaussian_process/plot_gpr_prior_posterior.py | Python | bsd-3-clause | 8,547 | 0.001989 | """
==========================================================================
Illustration of prior and posterior Gaussian process for different kernels
==========================================================================
This example illustrates the prior and posterior of a
:class:`~sklearn.gaussian_process.GaussianProcessRegressor` with different
kernels. Mean, standard deviation, and 5 samples are shown for both prior
and posterior distributions.
Here, we only give some illustration. To know more about kernels' formulation,
refer to the :ref:`User Guide <gp_kernels>`.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: BSD 3 clause
# %%
# Helper function
# ---------------
#
# Before presenting each individual kernel available for Gaussian processes,
# we will define an helper function allowing us plotting samples drawn from
# the Gaussian process.
#
# This function will take a
# :class:`~sklearn.gaussian_process.GaussianProcessRegressor` model and will
# drawn sample from the Gaussian process. If the model was not fit, the samples
# are drawn from the prior distribution while after model fitting, the samples are
# drawn from the posterior distribution.
import matplotlib.pyplot as plt
import numpy as np
def plot_gpr_samples(gpr_model, n_samples, ax):
"""Plot samples drawn from the Gaussian process model.
If the Gaussian process model is not trained then the drawn samples are
drawn from the prior distribution. Otherwise, the samples are drawn from
the posterior distribution. Be aware that a sample here corresponds to a
function.
Parameters
----------
gpr_model : `GaussianProcessRegressor`
A :class:`~sklearn.gaussian_process.GaussianProcessRegressor` model.
n_samples : int
The number of samples to draw from the Gaussian process distribution.
ax : matplotlib axis
The matplotlib axis where to plot the samples.
"""
x = np.linspace(0, 5, 100)
X = x.reshape(-1, 1)
y_mean, y_std = gpr_model.predict(X, return_std=True)
y_samples = gpr_model.sample_y(X, n_samples)
y_mean, y_std = gpr_model.predict(X, return_std=True)
y_samples = gpr_model.sample_y(X, n_samples)
for idx, single_prior in enumerate(y_samples.T):
ax.plot(
x,
single_prior,
linestyle="--",
alpha=0.7,
label=f"Sampled function #{idx + 1}",
)
ax.plot(x, y_mean, color="black", label="Mean")
ax.fill_between(
x,
y_mean - y_std,
y_mean + y_std,
alpha=0.1,
color="black",
label=r"$\pm$ 1 std. dev.",
)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_ylim([-3, 3])
# %%
# Dataset and Gaussian process generation
# ---------------------------------------
# We will create a training dataset that we will use in the different sections.
rng = np.random.RandomState(4)
X_train = rng.uniform(0, 5, 10).reshape(-1, 1)
y_train = np.sin((X_train[:, 0] - 2.5) ** 2)
n_samples = 5
# %%
# Kernel cookbook
# ---------------
#
# In this section, we illustrate some samples drawn from the prior and posterior
# distributions of the Gaussian process with different kernels.
#
# Radial Basis Function kernel
# ............................
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0))
gpr = GaussianProcessRegressor(kernel=kernel, random_state=0)
fig, axs = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(10, 8))
# plot prior
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[0])
axs[0].set_title("Samples from prior distribution")
# plot posterior
gpr.fit(X_train, y_train)
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[1])
axs[1].scatter(X_train[:, 0], y_train, color="red", zorder=10, label="Observations")
axs[1].legend(bbox_to_anchor=(1.05, 1.5), loc="upper left")
axs[1].set_title("Samples from posterior distribution")
fig.suptitle("Radial Basis Function kernel", fontsize=18)
plt.tight_layout()
# %%
print(f"Kernel parameters before fit:\n{kernel})")
print(
f"Kernel parameters after fit: \n{gpr.kernel_} \n"
f"Log-likelihood: {gpr.log_marginal_likelihood(gpr.kernel_.theta):.3f}"
)
# %%
# Rational Quadradtic kernel
# ..........................
from sklearn.gaussian_process.kernels import RationalQuadratic
kernel = 1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1, alpha_bounds=(1e-5, 1e15))
gpr = GaussianProcessRegressor(kernel=kernel, random_state=0)
fig, axs = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(10, 8))
# plot prior
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[0])
axs[0].set_title("Samples from prior distribution")
# plot posterior
gpr.fit(X_train, y_train)
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[1])
axs[1].scatter(X_train[:, 0], y_train, color="red", zorder=10, label="Observations")
axs[1].legend(bbox_to_anchor=(1.05, 1.5), loc="upper left")
axs[1].set_title("Samples from posterior distribution")
fig.suptitle("Rational Quadratic kernel", fontsize=18)
plt.tight_layout()
# %%
print(f"Kernel parameters before fit:\n{kernel})")
print(
f"Kernel parameters after fit: \n{gpr.kernel_} \n"
f"Log-likelihood: {gpr.log_marginal_likelihood(gpr.kernel_.theta):.3f}"
)
# %%
# Periodic kernel
# ...............
from sklearn.gaussian_process.kernels import ExpSineSquared
kernel = 1.0 * ExpSineSquared(
length_scale=1.0,
periodicity=3.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0),
)
gpr = GaussianProcessRegressor(kernel=kernel, random_state=0)
fig, axs = plt.subplots(n | rows=2, sharex=True, sharey=True, figsize=(10, 8))
# plot prior
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[0])
axs[0].set_title("Samples from prior distribution")
# plot posterior
gpr.fit(X_train, y_train)
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[1])
axs[1].scatter(X_train[:, 0], y | _train, color="red", zorder=10, label="Observations")
axs[1].legend(bbox_to_anchor=(1.05, 1.5), loc="upper left")
axs[1].set_title("Samples from posterior distribution")
fig.suptitle("Periodic kernel", fontsize=18)
plt.tight_layout()
# %%
print(f"Kernel parameters before fit:\n{kernel})")
print(
f"Kernel parameters after fit: \n{gpr.kernel_} \n"
f"Log-likelihood: {gpr.log_marginal_likelihood(gpr.kernel_.theta):.3f}"
)
# %%
# Dot product kernel
# ..................
from sklearn.gaussian_process.kernels import ConstantKernel, DotProduct
kernel = ConstantKernel(0.1, (0.01, 10.0)) * (
DotProduct(sigma_0=1.0, sigma_0_bounds=(0.1, 10.0)) ** 2
)
gpr = GaussianProcessRegressor(kernel=kernel, random_state=0)
fig, axs = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(10, 8))
# plot prior
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[0])
axs[0].set_title("Samples from prior distribution")
# plot posterior
gpr.fit(X_train, y_train)
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[1])
axs[1].scatter(X_train[:, 0], y_train, color="red", zorder=10, label="Observations")
axs[1].legend(bbox_to_anchor=(1.05, 1.5), loc="upper left")
axs[1].set_title("Samples from posterior distribution")
fig.suptitle("Dot product kernel", fontsize=18)
plt.tight_layout()
# %%
print(f"Kernel parameters before fit:\n{kernel})")
print(
f"Kernel parameters after fit: \n{gpr.kernel_} \n"
f"Log-likelihood: {gpr.log_marginal_likelihood(gpr.kernel_.theta):.3f}"
)
# %%
# Mattern kernel
# ..............
from sklearn.gaussian_process.kernels import Matern
kernel = 1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0), nu=1.5)
gpr = GaussianProcessRegressor(kernel=kernel, random_state=0)
fig, axs = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(10, 8))
# plot prior
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[0])
axs[0].set_title("Samples from prior distribution")
# plot posterior
gpr.fit(X_train, y_train)
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[1])
axs[1].scatter(X_train[:, 0], y_train, color="red", zorder=10, label="Observations")
axs[1].leg |
gallantlab/pycortex | cortex/dataset/dataset.py | Python | bsd-2-clause | 8,008 | 0.003746 | import tempfile
import numpy as np
import h5py
from ..database import db
from ..xfm import Transform
from .braindata import _hdf_write
from .views import normalize as _vnorm
from .views import Dataview, Volume, _from_hdf_data
class Dataset(object):
"""
Wrapper for multiple data objects. This often does not need to be used
explicitly--for example, if a dictionary of data objects is passed to
`cortex.webshow`, it will automatically be converted into a `Dataset`.
All kwargs should be `BrainData` or `Dataset` objects.
"""
def __init__(self, **kwargs):
self.h5 = None
self.views = {}
self.append(**kwargs)
def append(self, **kwargs):
"""Add the `BrainData` or `Dataset` objects in `kwargs` into this
dataset.
"""
for name, data in kwargs.items():
norm = normalize(data)
if isinstance(norm, Dataview):
self.views[name] = norm
elif isinstance(norm, Dataset):
self.views.update(norm.views)
else:
raise ValueError("Unknown input %s=%r"%(name, data))
return self
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
elif attr in self.views:
return self.views[attr]
raise AttributeError
def __getitem__(self, item):
return self.views[item]
def __iter__(self):
for name, dv in sorted(self.views.items(), key=lambda x: x[1].priority):
yield name, dv
def __repr__(self):
views = sorted(self.views.items(), key=lambda x: x[1].priority)
return "<Dataset with views [%s]>"%(', '.join([n for n, d in views]))
def __len__(self):
return len(self.views)
def __dir__(self):
return list(self.__dict__.keys()) + list(self.views.keys())
@classmethod
def from_file(cls, filename):
ds = cls()
ds.h5 = h5py.File(filename, 'r')
db.auxfile = ds
#detect stray datasets which were not written by pycortex
for name, node in ds.h5.items():
if name in ("data", "subjects", "views"):
continue
try:
ds.views[name] = _from_hdf_data(ds.h5, name)
except KeyError:
print('No metadata found for "%s", skipping...'%name)
#load up the views generated by pycortex
for name, node in ds.h5['views'].items():
try:
ds.views[name] = Dataview.from_hdf(node)
except Exception:
import traceback
traceback.print_exc()
db.auxfile = None
return ds
def uniques(self, collapse=False):
"""Return the set of unique BrainData objects contained by this dataset"""
| uniques = set()
for name, view in self:
for sv in view.uniques(collapse=collapse):
uniques.add(sv)
return uniques
def save(self, filename=None, pack=False):
if filename is not None:
self.h5 = h5py.File(filename, 'a')
elif self.h5 is None:
raise ValueError("Must provide filename for new datasets")
for n | ame, view in self.views.items():
view._write_hdf(self.h5, name=name)
if pack:
subjs = set()
xfms = set()
masks = set()
for view in self.views.values():
for data in view.uniques():
subjs.add(data.subject)
if isinstance(data, Volume):
xfms.add((data.subject, data.xfmname))
#custom masks are already packaged by default
#only string masks need to be packed
if isinstance(data._mask, str):
masks.add((data.subject, data.xfmname, data._mask))
_pack_subjs(self.h5, subjs)
_pack_xfms(self.h5, xfms)
_pack_masks(self.h5, masks)
self.h5.flush()
def get_surf(self, subject, type, hemi='both', merge=False, nudge=False):
if hemi == 'both':
left = self.get_surf(subject, type, "lh", nudge=nudge)
right = self.get_surf(subject, type, "rh", nudge=nudge)
if merge:
pts = np.vstack([left[0], right[0]])
polys = np.vstack([left[1], right[1]+len(left[0])])
return pts, polys
return left, right
try:
if type == 'fiducial':
wpts, polys = self.get_surf(subject, 'wm', hemi)
ppts, _ = self.get_surf(subject, 'pia', hemi)
return (wpts + ppts) / 2, polys
group = self.h5['subjects'][subject]['surfaces'][type][hemi]
pts, polys = group['pts'][:].copy(), group['polys'][:].copy()
if nudge:
if hemi == 'lh':
pts[:,0] -= pts[:,0].max()
else:
pts[:,0] -= pts[:,0].min()
return pts, polys
except (KeyError, TypeError):
raise IOError('Subject not found in package')
def get_xfm(self, subject, xfmname):
try:
group = self.h5['subjects'][subject]['transforms'][xfmname]
return Transform(group['xfm'][:], tuple(group['xfm'].attrs['shape']))
except (KeyError, TypeError):
raise IOError('Transform not found in package')
def get_mask(self, subject, xfmname, maskname):
try:
group = self.h5['subjects'][subject]['transforms'][xfmname]['masks']
return group[maskname]
except (KeyError, TypeError):
raise IOError('Mask not found in package')
def get_overlay(self, subject, type='rois', **kwargs):
try:
group = self.h5['subjects'][subject]
if type == "rois":
tf = tempfile.NamedTemporaryFile()
tf.write(group['rois'][0])
tf.seek(0)
return tf
except (KeyError, TypeError):
raise IOError('Overlay not found in package')
raise TypeError('Unknown overlay type')
def prepend(self, prefix):
"""Adds the given `prefix` to the name of every data object and returns
a new Dataset.
"""
ds = dict()
for name, data in self:
ds[prefix+name] = data
return Dataset(**ds)
def normalize(data):
if isinstance(data, (Dataset, Dataview)):
return data
elif isinstance(data, dict):
return Dataset(**data)
elif isinstance(data, str):
return Dataset.from_file(data)
elif isinstance(data, tuple):
return _vnorm(data)
raise TypeError('Unknown input type')
def _pack_subjs(h5, subjects):
for subject in subjects:
rois = db.get_overlay(subject, modify_svg_file=False)
rnode = h5.require_dataset("/subjects/%s/rois"%subject, (1,),
dtype=h5py.special_dtype(vlen=str))
rnode[0] = rois.toxml(pretty=False)
surfaces = db.get_paths(subject)['surfs']
for surf in surfaces.keys():
for hemi in ("lh", "rh"):
pts, polys = db.get_surf(subject, surf, hemi)
group = "/subjects/%s/surfaces/%s/%s"%(subject, surf, hemi)
_hdf_write(h5, pts, "pts", group)
_hdf_write(h5, polys, "polys", group)
def _pack_xfms(h5, xfms):
for subj, xfmname in xfms:
xfm = db.get_xfm(subj, xfmname, 'coord')
group = "/subjects/%s/transforms/%s"%(subj, xfmname)
node = _hdf_write(h5, np.array(xfm.xfm), "xfm", group)
node.attrs['shape'] = xfm.shape
def _pack_masks(h5, masks):
for subj, xfm, maskname in masks:
mask = db.get_mask(subj, xfm, maskname)
group = "/subjects/%s/transforms/%s/masks"%(subj, xfm)
_hdf_write(h5, mask, maskname, group)
|
citibeth/twoway | stieglitz/gic2stieglitz.py | Python | gpl-3.0 | 1,293 | 0.010054 | import os
import sys
import numpy as np
from giss.ncutil import copy_nc
import netCDF4
import argparse
from modele.constants import SHI,LHM,RHOI,RHOS,UI_ICEBIN,UI_NOTHING
parser = argparse.ArgumentParser(description= | 'Convert GIC file for old snow/firn model to one for Stieglitz.')
parser.add_argument('igic',
help="Name of classic GIC file to read")
parser.add_argument('--dir', '-d', dest='dir', default=' | .',
help='Directory in which to look for input file and --output dir')
parser.add_argument('--output', '-o', dest='ogic', required=True,
default=None,
help="Name of output Stieglitz GIC file to write. (Or directory if it ends in a slash)")
#parser.add_argument('--nlice', '-n', dest='nlice', type=int, default=5,
# help="Number of layers to create in Stieglitz model")
nlice = 5 # Hard-coded for now
args = parser.parse_args()
os.chdir(args.dir)
if args.ogic.endswith(os.sep): # User meant a directory
if os.path.islink(args.igic):
leaf = os.path.split(os.readlink(args.igic))[1]
else:
leaf = os.path.split(args.igic)[1]
os.makedirs(args.ogic, exist_ok=True)
ogic = os.path.join(args.ogic, os.path.splitext(leaf)[0] + '_stieglitz.nc')
else:
ogic = args.ogic
gic2stieglitz.gic2stieglitz(args.igic, args.ogic, nlice)
|
dgelessus/pythonista-scripts | wannabetabs.py | Python | mit | 2,450 | 0.006531 | # -*- coding: utf-8 -*-
###############################################################################
# wannabetabs by dgelessus
###############################################################################
import editor
import os
import ui
def full_path(path):
# Return absolute path with expanded ~ and symlinks, input path assumed relative to cwd
return os.path.realpath(os.path.abspath(os.path.expanduser(path)))
def current_path():
# path to file currently open in editor
return "/private" + editor.get_path()
def current_relpath():
# like current_path, but relative to Script Library
return os.path.relpath(current_path(), os.path.expanduser("~/Documents"))
def tb_button_action(sender):
# generic action for toolbar buttons
if sender in tab_list.left_button_items:
root_view.close()
elif sender in tab_list.right_button_items:
if sender.title == "Edit":
tab_list.set_editing(True)
sender.title = "Done"
elif sender.title == "Done":
tab_list.set_editing(False)
sender.title = "Edit"
elif sender == tab_list.right_button_items[2]:
tab_set = set(tab_ds.items)
tab_set.add(current_relpath())
tab_ds.items = list(tab_set)
def ds_action(sender):
# generic action for data source
if sender.selected_row >= 0:
editor.open_file(sender.items[sender.selected_row])
if __name__ == "__main__":
tab_set = set([current_relpath()])
root_view = ui.View(flex="WH")
root_view.width = 200
root_view.border_color = 0.7
root_view.border_width = 1
tab_ds = ui.ListDataSource(list(tab_set))
tab_ds.delete_enabled = True
tab_ds.move_enabled = True
tab_ds.action = ds_action
tab_list = ui.TableView(flex="WH")
tab_list.data_source = tab_ds
tab_list.delegate = tab_ds
tab_list.left_button_items = | ui.ButtonItem(title=" ", action=tb_button_action),
tab_list.right_button_items = (ui.ButtonItem(title=" ", action=tb_button_action), ui.ButtonItem(title="Edit", action=tb_button_action), ui.ButtonItem(image=ui.Image.named("ionicons-ios7 | -plus-empty-32"), action=tb_button_action))
nav = ui.NavigationView(tab_list, flex="WH")
nav.navigation_bar_hidden = False
root_view.add_subview(nav)
root_view.present("sidebar")
nav.width = root_view.width
nav.height = root_view.height
|
depop/celery-message-consumer | event_consumer/test_utils/handlers.py | Python | apache-2.0 | 1,527 | 0 | import logging
from event_consumer.conf import settings
from event_consumer.errors import PermanentFailure
from event_consumer.handlers import message_handler
_logger = logging.getLogger(__name__)
class IntegrationTestHandlers(object):
"""
Basic message handlers that log or raise known exceptions to allow
interactive testing of the RabbitMQ config.
"""
@staticmethod
def py_integration_ok(body):
"""
Should always succeed, never retry, never archive.
"""
msg = 'py_integration_ok, {}'.format(body)
_logger.info(msg)
@staticmethod
def py_integration_raise(body):
"""
Should retry until there are no attempts left, then archive.
"""
msg = 'py_integration_raise, {}'.format(body)
_logger.info(msg)
raise Exception(msg)
@staticmethod
def py_integration_raise_permanent(body):
"""
Should cause the message to be archived on first go.
"""
msg = 'py_integration_raise_permanent, {}'.format(body)
_logger.info(msg)
raise PermanentFailure(ms | g)
if settings.TEST_ENABLED:
# Add tasks for interactive testing (call decorators directly)
message_handler('py.integration.ok')(
| IntegrationTestHandlers.py_integration_ok)
message_handler('py.integration.raise')(
IntegrationTestHandlers.py_integration_raise)
message_handler('py.integration.raise.permanent')(
IntegrationTestHandlers.py_integration_raise_permanent)
|
szha/mxnet | tests/python/gpu/test_kvstore_gpu.py | Python | apache-2.0 | 6,074 | 0.003787 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import sys
import os
import mxnet as mx
import numpy as np
import pytest
from mxnet.test_utils import assert_almost_equal, default_device, environment
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
shape = (4, 4)
keys = [5, 7, 11]
str_keys = ['b', 'c', 'd']
def init_kv_with_str(stype='default', kv_type='local'):
"""init kv """
kv = mx.kv.create(kv_type)
# single
kv.init('a', mx.nd.zeros(shape, stype=stype))
# list
kv.init(str_keys, [mx.nd.zeros(shape=shape, stype=stype)] * len(keys))
return kv
# 1. Test seed 89411477 (module seed 1829754103) resulted in a py3-gpu CI runner core dump.
# 2. Test seed 1155716252 (module seed 1032824746) resulted in py3-dnnl-gpu have error
# src/operator/nn/dnnl/dnnl_base.cc:567: Check failed: similar
# Both of them are not reproducible, so this test is back on random seeds.
@pytest.mark.skipif(mx.device.num_gpus() < 2, reason="test_rsp_push_pull needs more than 1 GPU")
@pytest.mark.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14189")
@pytest.mark.serial
def test_rsp_push_pull():
def check_rsp_push_pull(kv_type, sparse_pull, is_push_cpu=True):
kv = init_kv_with_str('row_sparse', kv_type)
kv.init('e', mx.nd.ones(shape).tostype('row_sparse'))
push_ctxs = [mx.cpu(i) if is_push_cpu else mx.gpu(i) for i in range(2)]
kv.push('e', [mx.nd.ones(shape, ctx=context).tostype('row_sparse') for context in push_ctxs])
def check_rsp_pull(kv, ctxs, sparse_pull, is_same_rowid=False, use_slice=False):
count = len(ctxs)
num_rows = shape[0]
row_ids = []
all_row_ids = np.arange(num_rows)
vals = [mx.nd.sparse.zeros(shape=shape, ctx=ctxs[i], stype='row_sparse') for i in range(count)]
if is_same_rowid:
row_id = | np.random.randint(num_rows, size=num_rows)
row_ids = [mx.nd.array(row_id)] * count
elif use_slice:
total_row_ids = mx.nd.array(np.random.randint(num_rows, size=count*num_rows))
row_ids = [total_row_ids[i*num_rows : (i+1)*num_rows] for i in range(count)]
else:
for _ in range(count):
row_id = np.random.randint(num_rows, size=num_rows)
row_ | ids.append(mx.nd.array(row_id))
row_ids_to_pull = row_ids[0] if (len(row_ids) == 1 or is_same_rowid) else row_ids
vals_to_pull = vals[0] if len(vals) == 1 else vals
kv.row_sparse_pull('e', out=vals_to_pull, row_ids=row_ids_to_pull)
for val, row_id in zip(vals, row_ids):
retained = val.asnumpy()
excluded_row_ids = np.setdiff1d(all_row_ids, row_id.asnumpy())
for row in range(num_rows):
expected_val = np.zeros_like(retained[row])
expected_val += 0 if row in excluded_row_ids else 2
assert_almost_equal(retained[row], expected_val)
if sparse_pull is True:
kv.pull('e', out=vals_to_pull, ignore_sparse=False)
for val in vals:
retained = val.asnumpy()
expected_val = np.zeros_like(retained)
expected_val[:] = 2
assert_almost_equal(retained, expected_val)
check_rsp_pull(kv, [mx.gpu(0)], sparse_pull)
check_rsp_pull(kv, [mx.cpu(0)], sparse_pull)
check_rsp_pull(kv, [mx.gpu(i//2) for i in range(4)], sparse_pull)
check_rsp_pull(kv, [mx.gpu(i//2) for i in range(4)], sparse_pull, is_same_rowid=True)
check_rsp_pull(kv, [mx.cpu(i) for i in range(4)], sparse_pull)
check_rsp_pull(kv, [mx.cpu(i) for i in range(4)], sparse_pull, is_same_rowid=True)
check_rsp_pull(kv, [mx.gpu(i//2) for i in range(4)], sparse_pull, use_slice=True)
check_rsp_pull(kv, [mx.cpu(i) for i in range(4)], sparse_pull, use_slice=True)
envs = [None, '1']
key = 'MXNET_KVSTORE_USETREE'
for val in envs:
with environment(key, val):
if val is '1':
sparse_pull = False
else:
sparse_pull = True
check_rsp_push_pull('local', sparse_pull)
check_rsp_push_pull('device', sparse_pull)
check_rsp_push_pull('device', sparse_pull, is_push_cpu=False)
def test_row_sparse_pull_single_device():
kvstore = mx.kv.create('device')
copy = mx.nd.random_normal(shape=(4,4), ctx=mx.gpu(0))
grad = copy.tostype("row_sparse")
key = 0
kvstore.init(key, grad)
idx = grad.indices
kvstore.push(key, grad)
kvstore.row_sparse_pull(key, out=grad, row_ids=idx)
assert_almost_equal(grad.asnumpy(), copy.asnumpy())
@pytest.mark.serial
def test_rsp_push_pull_large_rowid():
num_rows = 793470
val = mx.nd.ones((num_rows, 1)).tostype('row_sparse').copyto(mx.gpu())
kv = mx.kv.create('device')
kv.init('a', val)
out = mx.nd.zeros((num_rows,1), stype='row_sparse').copyto(mx.gpu())
kv.push('a', val)
kv.row_sparse_pull('a', out=out, row_ids=mx.nd.arange(0, num_rows, dtype='int64'))
assert(out.indices.shape[0] == num_rows)
|
nre/Doxhooks | tests/unit_tests/conftest.py | Python | mit | 1,575 | 0 | import io
from pytest import fixture
class _FakeOutputFile(io.StringIO):
def close(self):
self.contents = self.getvalue()
super().close()
@fixture
def fake_output_file():
return _FakeOutputFile()
type_example_values = {
"none": (None,),
"callable": (lambda: None,),
"bool": (True, False),
"digit_int": (0, 2,),
"float": (0.0, 1.2,),
"bytes": (b"", b"bytes"),
"str": ("", "string"),
"list": ([], [None]),
"tuple": ((), (None,)),
"dict": ({}, {None: None}),
"set": (set(), {None}),
}
basic_types = set([type_name for type_name in type_example_values])
class TypeSets:
int = {"digit_int", "bool"}
# number = int | {"float"}
# digits = {"digit_int", "float"}
# sequence = {"bytes", "str", "list", "tuple"}
# iterable = sequence | {"dict", "set"}
# subscriptable = sequence | {"dict"}
# hashable = basic_types - {"list", "dict", "set"}
for type_name in basic_types:
setattr(TypeSets, type_name, {type_name})
def values_not_from_types(*type_names):
excluded_basic_types = [getattr(TypeSets, name) for name in type_names]
rem | aining_basic_types = basic_types.difference(*excluded_basic_types)
example_values = []
for basic_type in remaining_basic_types:
example_values.extend(type_example_values[basic_type]) |
return example_values
@fixture(params=values_not_from_types("int", "none"))
def not_int_or_none(request):
return request.param
@fixture(params=values_not_from_types("str"))
def not_str(request):
return request.param
|
keon/algorithms | algorithms/search/jump_search.py | Python | mit | 1,064 | 0.00565 | """
Jump Search
Find an element in a sorted array.
"""
import math
def jump_search(arr,target):
"""
Worst-case Complexity: O(√n) (root(n))
All items in list must be sorted like binary search
Find block that contains target value and search it linearly in that block
It returns a first target value in array
reference: https://en.wikipedia.org/wiki/Jump_search
| """
length = len(arr)
block_size = int(math.sqrt(length))
block_prev = 0
block= block_size
# return -1 means that array doesn't contain target value
# find block that contai | ns target value
if arr[length - 1] < target:
return -1
while block <= length and arr[block - 1] < target:
block_prev = block
block += block_size
# find target value in block
while arr[block_prev] < target :
block_prev += 1
if block_prev == min(block, length) :
return -1
# if there is target value in array, return it
if arr[block_prev] == target :
return block_prev
return -1
|
YannChemin/wxGIPE | RS_functions/evapo_pot_rs.py | Python | unlicense | 2,337 | 0.035944 | """
Generic remote sensing based ET potential using radiation
"""
def solarday(latitude, doy, tsw ):
"""
Average Solar Diurnal Radiation after Bastiaanssen (1995)
tsw = 0.7 generally clear-sky Single-way transmissivity of the atmosphere [0.0-1.0]
solarday(latitude, doy, tsw )
"""
PI=3.1415927
ds = 1.0 + 0.01672 * sin(2*PI*(doy-93.5)/365.0)
delta = 0.4093*sin((2*PI*doy/365)-1.39)
temp = lat * PI / 180.0
ws = acos(-tan(temp)*tan(delta*PI/180.0))
cosun = ws*sin(delta*PI/180.0)*sin(temp)+cos(delta*PI/180.0)*cos(temp)*sin(ws)
result = ( cosun * 1367 * tsw ) / ( PI * ds * ds )
return result
def solarday3d( latitude, doy, tsw, slope, aspect ):
"""
// Average Solar Diurnal Radiation after Bastiaanssen (1995)
// Includes Slope and aspect correction
"""
PI = 3.1415927
ds = 1.0 + 0.01672 * sin(2 * PI * (doy - 93.5) / 365.0)
delta = 0.4093 * sin((2 * PI * doy / 365) - 1.39)
deltarad = delta * PI / 180.0
latrad = latitude * PI / 180.0
slrad = slope * PI / 1 | 80.0
asprad = aspect * PI / 180.0
ws = acos(-tan(latrad)*tan(deltarad))
temp1 = sin(deltarad) * sin(latrad) * cos(slrad)
temp2 = sin(deltarad) * cos(latrad) * sin(slrad) * cos(asprad)
temp3 = cos(deltarad) * cos(latrad) * cos(slrad) * cos(ws*PI/180.0)
temp4 = cos(deltarad) * sin(slrad) * cos(asprad) * cos(ws*PI/18 | 0.0)
temp5 = cos(deltarad) * sin(slrad) * sin(asprad) * sin(ws*PI/180.0)
costheta = (temp1 - temp2 + temp3 + temp4 + temp5) / cos(slrad)
result = ( costheta * 1367 * tsw ) / ( PI * ds * ds )
return result
def rnetday( albedo, solarday, tsw ):
"""
Average Diurnal Net Radiation after Bastiaanssen (1995)
tsw = 0.7 generally clear-sky Single-way transmissivity of the atmosphere [0.0-1.0]
output in W/m2
rnetday( albedo, solarday, tsw )
"""
result = ((1.0 - albedo)*solar)-(110.0*tsw)
return result
def etpotday( albedo, solarday, temperature, tsw, roh_water ):
"""
Average Diurnal Potential ET after Bastiaanssen (1995) in mm/day
tsw = 0.7 generally clear-sky Single-way transmissivity of the atmosphere [0.0-1.0]
roh_water = 1005 generally for non muddy Density of water (~1000 g/m3)
etpotday( albedo, solarday, temperature, tsw, roh_water )
"""
latent=(2.501-(0.002361*(temperature-273.15)))*1000000.0;
result = ((((1.0 - albedo)*solarday)-(110.0*tsw))*86400.0*1000.0)/(latent*roh_water);
return result |
sanja7s/EEDC | src/timelines/node_plug_timeline.py | Python | apache-2.0 | 10,157 | 0.033278 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
author: sanja7s
---------------
plot the distribution
"""
import os
import datetime as dt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from collections import defaultdict
from matplotlib import colors
from pylab import MaxNLocator
import pylab as pl
from mpl_toolkits.axes_grid import inset_locator
matplotlib.style.use('ggplot')
IN_DIR = "../../data/timelines"
os.chdir(IN_DIR)
font = {'family' : 'sans-serif',
'variant' : 'normal',
'weight' : 'light',
'size' : 14}
grid = {'color' : 'gray',
'alpha' : 0.5,
'linestyle' : '-.'}
lines = {'color' : 'gray'}
#xticks = {'color' : 'gray'}
matplotlib.rc('font', **font)
matplotlib.rc('grid', **grid)
matplotlib.rc('lines', **lines)
#matplotlib.rc('ticks', **ticks)
def read_in_plug_data(node):
f_in = 'node_' + node +'_plug.csv'
distr = defaultdict(int)
with open(f_in, 'r') as f:
for line in f:
# n are irrelevant
n, n, n, t, n, plug, n, n, n = line.strip().split('"')
t = dt.datetime.fromtimestamp(int(t))
plug = float(plug)
distr[t] = plug
return distr
def read_in_num_jobs_data(node):
f_in = 'node_' + node +'_plug.csv'
distr = defaultdict(int)
with open(f_in, 'r') as f:
for line in f:
n, n, n, t, n, n, n, jobs_list, n6 = line.strip().split('"')
t = dt.datetime.fromtimestamp(int(t))
jobs = jobs_list.split(',')
if jobs_list == "":
distr[t] = 0
else:
distr[t] = len(jobs)
return distr
def read_in_CPU_data(node):
f_in = 'node_' + node +'_CPUMEM.csv'
distr = defaultdict(int)
with open(f_in, 'r') as f:
for line in f:
n, n, n, t, n, n, n, CPU1, n, CPU2, n, n, n, n, n | = line.strip().split('"')
t = dt.datetime.fromtimestamp(in | t(t))
CPU1 = float(CPU1)
CPU2 = float(CPU2)
distr[t] = (CPU1, CPU2)
return distr
def read_in_MEM_data(node):
f_in = 'node_' + node +'_CPUMEM.csv'
distr = defaultdict(int)
with open(f_in, 'r') as f:
for line in f:
n, n, n, t, n, n, n, n, n, n, n, MEM1, n, MEM2, n = line.strip().split('"')
t = dt.datetime.fromtimestamp(int(t))
MEM1 = float(MEM1)
MEM2 = float(MEM2)
distr[t] = (MEM1, MEM2)
return distr
def read_in_rb_data(node):
f_in = 'node_' + node +'_rb.csv'
distr = defaultdict(int)
with open(f_in, 'r') as f:
for line in f:
n, n, n, t, n, n, n, r, n, b, n = line.strip().split('"')
t = dt.datetime.fromtimestamp(int(t))
r = int(r)
b = int(b)
distr[t] = (r, b)
return distr
def plot_plug_timeline(node):
print 'Plotting plug values'
d = read_in_plug_data(node)
dates = d.keys()
X = pd.to_datetime(dates)
values = [v if v > 0 else 0 for v in d.values()]
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print min(values), max(values)
fig, ax = plt.subplots()
ax.scatter(X, values, marker='s', s=1)
#ax.plot(X, values)
fig.autofmt_xdate()
ax.set_xlabel('time')
ax.set_ylabel('plug value')
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
#plt.show()
plt.savefig('plug_timeline_node_' + node + '.png')
return fig, ax, plt
def plot_plug_timeline_v2(node):
print 'Plotting plug values'
d = read_in_plug_data(node)
dates = d.keys()
X = pd.to_datetime(dates)
values = [v if v > 0 else 0 for v in d.values()]
ts = pd.Series(values, index = X)
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print min(values), max(values)
fig, ax = plt.subplots()
ts.plot(color = 'darkblue')
for tl in ax.get_yticklabels():
tl.set_color('darkblue')
fig.autofmt_xdate()
ax.set_xlabel('time')
ax.set_ylabel('plug value', color='darkblue')
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
ymin = 240
ymax = 280
if min(values) < 160:
ymin = min(values) - 10
if max(values) > 250:
ymax = max(values) + 10
plt.ylim(ymin, ymax)
#plt.savefig(cwd + '/multiple_v2/plug_only/plug_timeline_node_' + node + '_v2.png')
return fig, ax, plt
def plot_plug_and_num_jobs_timeline(node):
print 'Plotting num of jobs values'
d = read_in_num_jobs_data(node)
dates = d.keys()
X = pd.to_datetime(dates)
values = d.values()
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print min(values), max(values)
fig, ax1, plt = plot_plug_timeline_v2(node)
ax2 = ax1.twinx()
ax2.scatter(X, values,
marker='s', color='red', s=7)
ax2.set_ylabel('# of jobs', color='red')
ya = ax2.get_yaxis()
ya.set_major_locator(MaxNLocator(integer=True))
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
for tl in ax2.get_yticklabels():
tl.set_color('r')
cwd = os.getcwd()
print cwd
plt.savefig(cwd + '/lowest_norm_stdev/SandyBridge/num_jobs_and_plug_timeline_node_' + node + '_v2.png')
def plot_plug_and_CPUs_timeline(node):
print 'Plotting CPUs values'
d = read_in_CPU_data(node)
dates = d.keys()
X = pd.to_datetime(dates)
values1 = []
values2 = []
for el in d.values():
if el[0] > 0:
v1 = el[0]
else:
v1 = 0
values1.append(v1)
if el[1] > 0:
v2 = el[1]
else:
v2 = 0
values2.append(v2)
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print 'Min and max CPU1 ', min(values1), max(values1)
print 'Min and max CPU2 ', min(values2), max(values2)
fig, ax1, plt = plot_plug_timeline_v2(node)
ax2 = ax1.twinx()
ts1 = pd.Series(values1, index = X)
ax2.scatter(X, values1, marker='s', color='red', s=4, label = 'CPU1')
#ts1.plot(color='red', label = 'CPU1')
ts2 = pd.Series(values2, index = X)
ax2.scatter(X, values2, marker='s', color='magenta', s=4, label = 'CPU2')
#ts2.plot(color='magenta', label = 'CPU2')
ax2.set_ylabel('CPU values', color='red')
ya = ax2.get_yaxis()
ya.set_major_locator(MaxNLocator(integer=True))
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
for tl in ax2.get_yticklabels():
tl.set_color('r')
handles, labels = ax2.get_legend_handles_labels()
l = ax2.legend(handles, labels, loc=3)
for text in l.get_texts():
text.set_color('gray')
plt.savefig('lowest_norm_stdev/SandyBridge/CPUs_plug_timeline_node_' + node + '.png')
def plot_plug_and_MEM_timeline(node):
print 'Plotting DRAM values'
d = read_in_MEM_data(node)
dates = d.keys()
X = pd.to_datetime(dates)
values1 = [v[0] if v[0] > -1 else -1 for v in d.values()]
values2 = [v[1] if v[1] > -1 else -1 for v in d.values()]
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print 'Min and max MEM1 ', min(values1), max(values1)
print 'Min and max MEM2 ', min(values2), max(values2)
fig, ax1, plt = plot_plug_timeline(node)
ax2 = ax1.twinx()
ax2.scatter(X, values1,
marker='s', color='darkgreen', s=4, label = 'DRAM1')
ax2.scatter(X, values2,
marker='s', color='olive', s=4, label = 'DRAM2')
ax2.set_ylabel('DRAM values', color='olive')
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
for tl in ax2.get_yticklabels():
tl.set_color('olive')
handles, labels = ax2.get_legend_handles_labels()
l = ax2.legend(handles, labels, loc=1)
for text in l.get_texts():
text.set_color('gray')
plt.savefig('MEM_plug_timeline_node_' + node + '.png')
def plot_plug_and_rb_timeline(node):
print 'Plotting r b values'
d = read_in_rb_data(node)
dates = d.keys()
X = pd.to_datetime(dates)
values1 = [v[0] if v[0] > 0 else 0 for v in d.values()]
values2 = [v[1] if v[1] > 0 else 0 for v in d.values()]
start_time = min(dates)
end_time = max(dates)
print start_time, end_time
print 'Min and max MEM1 ', min(values1), max(values1)
print 'Min and max MEM2 ', min(values2), max(values2)
fig, ax1, plt = plot_plug_timeline(node)
ax2 = ax1.twinx()
ax2.scatter(X, values1,
marker='s', color='tomato', s=3, label = 'r')
ax2.scatter(X, values2,
marker='s', color='sage', s=3, label = 'b')
ax2.set_ylabel('r and b values', color='sage')
ya = ax2.get_yaxis()
ya.set_major_locator(MaxNLocator(integer=True))
plt.xlim(pd.to_datetime(start_time), pd.to_datetime(end_time))
for tl in ax2.get_yticklabels():
tl.set_color('sage')
handles, labels = ax2.get_legend_handles_labels()
l = ax2.legend(handles, labels, loc=1)
for text in l.get_texts |
AnthonyCalandra/modern-cpp-features | auto-generate-readme.py | Python | mit | 2,305 | 0.000868 | from pathlib import Path
class MarkdownParser():
def __init__(self, text):
self.text = text
se | lf.lines = text.split('\n')
def title(self):
return self.lines[0].split(' ')[1]
def header(self, name, level, include_header=False):
start = False
end = False
content = []
mark = '#' * level
for line in self.lines:
if start and not end:
end |= (f'{mark} ' in line[:(level + 1)]) and (not f'{mark} {name}' in line)
if end:
start = False
| else:
content.append(line)
else:
start = (f'{mark} {name}' in line)
if start:
end = False
if include_header:
content.append(line)
content = '\n'.join(content)
return content
def overview(self):
overview = self.header('Overview', 2)
overview = overview.split('\n')
overview = '\n'.join(overview[1:]) # remove the first line
return overview
def features(self):
return self.header('C++', 2, True)
def combine(text, parsers):
overview = ''
features = ''
title = ''
for p in parsers:
title += p.title().replace('C++', '') + '/'
overview += p.overview() + '\n'
features += p.features() + '\n'
title = title[:-1]
overview = overview.replace('README.md#', '#')
features = features.replace('README.md#', '#')
text = text.replace('# C++\n', f'# C++{title}\n')
text = text.replace(f'<!-- overview -->', overview)
text = text.replace(f'<!-- features -->', features)
return text
def main():
src_dir = Path(__file__).parent
parsers = []
srcs = list(src_dir.glob('CPP*.md'))
srcs.sort(reverse=True)
for file in srcs:
with open(file, 'r') as fp:
text = fp.read()
p = MarkdownParser(text)
parsers.append(p)
template_file = src_dir / 'readme-template.md'
with open(template_file, 'r') as fp:
text = fp.read()
text = combine(text, parsers)
readme_file = src_dir / 'README.md'
with open(readme_file, 'w') as fp:
fp.write(text)
if __name__ == '__main__':
main()
|
gramhagen/emojibot | tests/utils/test_response.py | Python | mit | 164 | 0 | # -*- coding: utf-8 -*-
from emojibot.utils.response import Response
d | ef test_constructor():
response = Response()
assert i | sinstance(response, Response)
|
JiapengLi/pqcom | pqcom/util.py | Python | mit | 387 | 0.002584 |
import sys
import os
import pk | g_resources
VERSION = 0.5
script_path = os.path.dirname(sys.argv[0])
def resource_path(relative_path):
base_path = getattr(sys, '_MEIPASS', script_path)
full_path = os.path.join(base_path, rel | ative_path)
if os.path.isfile(full_path):
return full_path
else:
return pkg_resources.resource_filename(__name__, relative_path)
|
duke605/RunePy | commands/portables.py | Python | mit | 4,554 | 0.002855 | from secret import GOOGLE_API_KEY
from datetime import datetime
from util.arguments import Arguments
from discord.ext import commands
from shlex import split
from util.choices import enum
from collections import namedtuple
import util
import re
import urllib
import discord
class Portables:
def __init__(self, bot):
self.bot = bot
@staticmethod
def _format_data(json):
date_format = '%d %b, %H:%M'
struct = namedtuple('Portable', ['author', 'last_updated', 'locations', 'time'])
# Populating portables
time = datetime.strptime(json['values'][2][1], date_format).replace(year=datetime.utcnow().year)
author = json['values'][2][3]
last_updated = util.format_timedelta(datetime.utcnow() - time, short_names=True)
locations = {'fletchers': {}, 'crafters': {}, 'braziers': {}, 'sawmills': {}, 'forges': {}, 'ranges': {}, 'wells': {}}
# Finding all worlds for portables
for i in range(7):
worlds = locations[json['values'][0][i].strip().lower()]
locs = json['values'][1][i]
# Checking if no worlds
if 'host needed' in locs.lower() or 'n/a' in locs.lower():
continue
# Separating locations
for location in re.findall('\d+.+?(?:CA|MG|PRIFF|PRIF|P|BU|SP|CW|BA)', locs.upper()):
name = location.split(' ')[-1]
name = re.sub('(?:PRIFF|PRIF)', 'P', name, re.I)
worlds[name] = re.findall('\d+', location)
return struct(author=author, locations=locations, last_updated=last_updated, time=time)
@staticmethod
async def _get_portables(http):
"""
Gets data from the google spreadsheet
"""
host = 'https://sheets.googleapis.com/v4/spreadsheets'
sheet_id = '16Yp-eLHQtgY05q6WBYA2MDyvQPmZ4Yr3RHYiBCBj2Hc'
sheet_name = 'Home'
range = 'A16:G18'
url = '%s/%s/values/%s!%s?key=%s' % (host, sheet_id, sheet_name, range, GOOGLE_API_KEY)
# Getting cells
async with http.get(url) as r:
# Checking request
if r.status != 200:
return None
return Portables._format_data(await r.json())
@commands.command(pass_context=True, aliases=['ports', 'port', 'portable'], description='Shows portable locations.')
async def portables(self, ctx, *, msg: str = ''):
ports = {
'fletcher': ('fletchers', 'fletch'),
'crafter': ('crafters', 'craft'),
'brazier': ('braziers', 'braz'),
'sawmill': ('saw', 'mill', 'sawmills'),
'forge': ('forges',),
'range': ('ranges', 'cook'),
'well': ('wells',)
}
parser = Arguments(allow_abbrev=False, prog='portables')
parser.add_argument('portable', nargs='?', type=enum(**ports), help='Selects a type of portable to search for.')
# Parsing arguments
await self.bot.send_typing(ctx.message.channel)
try:
args = parser.parse_args(split(msg))
except SystemExit:
await self.bot.say('```%s```' % parser.format_help())
return
except Exception as e:
await self.bot.say('```%s```' % str(e))
return
# Get portables | from google sheet
portables = await Portables._get_portables(self.bot.whttp)
if not portables:
await self.bot.say('Google sheet could not be reached.')
return
# Building message
e = discord.Embed()
e.colour = 0x3572a7
e.timestamp = portables.time
e.set_footer(text='Updated %s ago' % portables.last_updated)
e.set_author(name=portables | .author,
icon_url='http://services.runescape.com/m=avatar-rs/%s/chat.png' % urllib.parse.quote(portables.author))
# Adding portable locations
for portable, locations in portables.locations.items():
# Skipping if no the portable requested
if args.portable and args.portable not in portable:
continue
# No location for portable
if not locations:
e.add_field(name=portable.capitalize(), value='N/A')
continue
value = '\n'.join(['%s %s' % (', '.join(worlds), location) for location, worlds in locations.items()])
e.add_field(name=portable.capitalize(), value=value)
await self.bot.say(embed=e)
def setup(bot):
bot.add_cog(Portables(bot))
|
robocomp/robocomp | tools/cli/robocompdsl/robocompdsl/templates/templateCPP/plugins/agm/functions/src/specificworker_cpp.py | Python | gpl-3.0 | 3,615 | 0.018534 | import datetime
from string import Template
import robocompdsl.dsl_parsers.parsing_utils as p_utils
from robocompdsl.templates.templateCPP.plugins.base.functions import function_utils as utils
from robocompdsl.templates.common.templatedict import TemplateDict
AGM_INNERMODEL_ASSOCIATION_STR = """\
innerModel = std::make_shared<InnerModel>(new InnerModel());
try
{
RoboCompAGMWorldModel::World w = agmexecutive_proxy->getModel();
AGMExecutiveTopic_structuralChange(w);
}
catch(...)
{
printf("The executive is probably not running, waiting for first AGM model publication...");
}
"""
SET_PARAMETERS_AND_POSSIBLE_ACTIVATION = """
bool SpecificWorker::setParametersAndPossibleActivation(const RoboCompAGMCommonBehavior::ParameterMap &prs, bool &reactivated)
{
printf("<<< setParametersAndPossibleActivation\\n");
// We didn't reactivate the component
reactivated = false;
// Update parameters
params.clear();
for (RoboCompAGMCommonBehavior::ParameterMap::const_iterator it=prs.begin(); it!=prs.end(); it++)
{
params[it->first] = it->second;
}
try
{
action = params["action"].value;
std::transform(action.begin(), action.end(), action.begin(), ::tolower);
//TYPE YOUR ACTION NAME
if (action == "actionname")
{
active = true;
}
else
{
active = true;
}
}
catch (...)
{
printf("exception in setParametersAndPossibleActivation %d\\n", __LINE__);
return false;
}
// Check if we should reactivate the component
if (active)
{
active = true;
reactivated = true;
}
printf("setParametersAndPossibleActivation >>>\\n");
return true;
}
"""
SEND_MODIFICATION_PROPOSAL = """
void SpecificWorker::sendModificationProposal(AGMModel::SPtr &worldModel, AGMModel::SPtr &newModel)
{
try
{
AGMMisc::publishModification(newModel, ${proxy}, \"${agent_name}Agent\");
}
/* catch(const RoboCompAGMExecutive::Locked &e)
{
}
catch(const RoboCompAGMExecutive::OldModel &e)
{
}
catch(const RoboCompAGMExecutive::InvalidChange &e)
{
}
*/
catch(const Ice::Exception& e)
{
exit(1);
}
}
"""
class specificworker_cpp(TemplateDict):
def __init__(self, component):
super(specificworker_cpp, self).__init__()
self.component = component
self['agmagent_attributes'] = self.agmagent_attributes()
self['agm_innermodel_association'] = self.agm_innermodel_association()
self['agm_specific_code'] = self.agm_specific_code()
def agmagent_attributes(self):
result = ""
if self.component.is_agm_agent():
result += "active = false;\n"
result += "worldModel = AGMModel::SPtr(new AGMModel());\n"
result += "worldModel->name = \"worldModel\";\n"
return result
def agm_innermodel_association(self):
result = ""
if self.component.is_agm_agent():
result += AGM_INNERMODEL_ASSOCIATION_STR
return result
| def agm_specific_ | code(self):
result = ""
if ('agmagent' in [x.lower() for x in self.component.options]) and (
'innermodelviewer' in [x.lower() for x in self.component.options]):
result += REGENERATE_INNERMODEL
if 'agmagent' in [x.lower() for x in self.component.options]:
result += SET_PARAMETERS_AND_POSSIBLE_ACTIVATION
agent_name = self.component.name
if self.component.language.lower() == "cpp":
proxy = "agmexecutive_proxy"
else:
proxy = "*agmexecutive_proxy.get()"
result += Template(SEND_MODIFICATION_PROPOSAL).substitute(proxy=proxy, agent_name=agent_name)
return result |
florisvb/multi_tracker | examples/demo/demo_2/src/raw_data_bag_config.py | Python | mit | 177 | 0 | class Config:
def __init__(self):
self | .basename = 'raw_data_N2'
self.directory = '~/orchard/data'
self.topic | s = ['/multi_tracker/2/tracked_objects']
|
zhaofengli/refill | backend/refill/models/citation.py | Python | bsd-2-clause | 5,016 | 0.001595 | import dateparser
from datetime import date
class Citation:
FIELDS = {
'type': str,
'url': str,
'title': str,
'date': date,
'accessdate': date,
'year': int,
'authors': list,
'editors': list,
'publisher': str,
'work': str,
'website': str,
'archiveurl': str,
'archivedate': date,
'deadurl': bool,
'via': str,
'journal': str,
'volume': str,
'issue': str,
'pages': str, # auto-generated from pagefrom/pageto, if they exist
'pagefrom': int,
'pageto': int,
'pmid': str,
'pmc': str,
'doi': str,
'arxiv': str,
'raw': dict,
}
MAGIC_FIELDS = [
'pages',
]
# Machine-accessible locators
LOCATORS = [
'url',
'doi',
'pmc',
'pmid',
'arxiv',
]
def __init__(self, **kwargs):
self.__dict__['_data'] = {}
for field in Citation.FIELDS:
self.__resetField(field)
self.__dict__['_originalFrozen'] = False
self.__dict__['_originalFields'] = set()
self._data['type'] = 'webpage' # implicit/derived
for field, value in kwargs.items():
self[field] = value
def __setattr__(self, field: str, value: str):
if field.startswith('_'):
self.__dict__[field] = value
return
self._data[field] = self.__cleanValue(field, value)
if not self._originalFrozen:
self._originalFields.add(field)
if field == 'pages':
if 'pagefrom' in self: del self.pagefrom
if 'pageto' in self: del self.pageto
def __getattr__(self, field: str):
self.__assertValidField(field)
if field == 'pages':
if 'pagefrom' in self and 'pageto' in self and self.pagefrom != self.pageto:
self._data['pages'] = '{}-{}'.format(self.pagefrom, self.pageto)
elif 'pagefrom' in self:
self._data['pages'] = self.pagefrom
elif 'pageto' in self:
self._data['pages'] = self.pageto
return self._data[field]
def __setitem__(self, field: str, value: str):
self.__setattr__(field, value)
def __getitem__(self, field: str):
return self.__getattr__(field)
def __delattr__(self, field: str):
self.__assertValidField(field)
self.__resetField(field)
def __delitem__(self, field: str):
return self.__delattr__(field)
def __contains__(self, field: str):
if field in Citation.MAGIC_FIELDS:
return bool(getattr(self, field))
return field in self._data and bool(getattr(self, field))
def __iter__(self):
for field in Citation.FIELDS:
if field in self:
yield (field, getattr(self, field))
def __eq__(self, operand):
if not isinstance(operand, self.__class__):
return False
return self._data == operand._data
def addAuthor(self, author: str):
self.authors.append(author)
def removeAuthor(self, author: str):
self.authors.remove(author)
def merge(self, citation: 'Citation'):
for key, value in citation._data.items():
if value:
self._data[key] = value
def freezeOriginal(self):
self._originalFrozen = True
def isDerived(self, field: str) -> bool:
return not self.isOriginal(field)
def isOriginal(self, field: str) -> bool:
self.__assertValidField(field)
return field in self._originalFields
def isLocatable(self) -> bool:
return bool([field for field in Citation.LOCATORS if field in self])
# Private
def __assertValidField(self, field):
if field not in Citation.FIELDS:
raise NameError('Invalid field: {}'.format(field))
return True
def __cleanValue(self, field, value):
self.__assertValidField(field)
ftype = Citation.FIELDS[field]
if ftype is date and type(value) is str:
d = dateparser.parse(value)
if not d:
raise ValueError('Invalid date {}'.format(value))
return d.date()
elif ftype is int and type(value) is str:
if not value.isdigit():
raise ValueError('Invalid str of int {}'.format(value))
return int(value)
elif type(ftype) is list and value not in ftype:
raise ValueError('Invalid value {} - Valid values are {}'.format(value, ftype))
e | lif not type(value) is ftype:
raise ValueError('Invalid value {} for field {}'.format(value | , field))
if type(value) is str:
value = value.strip()
return value
def __resetField(self, field):
ftype = Citation.FIELDS[field]
if ftype is date:
self._data[field] = None
else:
self._data[field] = ftype()
|
apul1421/table-client-side-app-retake | src/ecommerce2/views.py | Python | gpl-3.0 | 237 | 0.016878 | from django.s | hortcuts import render
def about(request):
return render(request, "about.html", {})
def location(request):
return render(request, "location.html", { | })
def failure(request):
return render(request, "failure.html", {})
|
rh-s/heat | heat/engine/scheduler.py | Python | apache-2.0 | 18,011 | 0.000056 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import sys
import types
import eventlet
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
import six
from six import reraise as raise_
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common import timeutils
LOG = logging.getLogger(__name__)
# Whether TaskRunner._sleep actually does an eventlet sleep when called.
ENABLE_SLEEP = True
def task_description(task):
"""
Return a human-readable string description of a task suitable for logging
the status of the task.
"""
name = task.__name__ if hasattr(task, '__name__') else None
if isinstance(task, types.MethodType):
if name is not None and hasattr(task, '__self__'):
return '%s from %s' % (name, task.__self__)
elif isinstance(task, types.FunctionType):
if name is not None:
return six.text_type(name)
return repr(task)
class Timeout(BaseException):
"""
Timeout exception, raised within a task when it has exceeded its allotted
(wallclock) running time.
This allows the task to perform any necessary cleanup, as well as use a
different exception to notify the controlling task if appropriate. If the
task suppresses the exception altogether, it will be cancelled but the
controlling task will not be notified of the timeout.
"""
def __init__(self, task_runner, timeout):
"""
Initialise with the TaskRunner and a timeout period in seconds.
"""
message = _('%s Timed out') % six.text_type(task_runner)
super(Timeout, self).__init__(message)
self._duration = timeutils.Duration(timeout)
def expired(self):
return self._duration.expired()
def trigger(self, generator):
"""Trigger the timeout on a given generator."""
try:
generator.throw(self)
except StopIteration:
return True
else:
# Clean up in case task swallows exception without exiting
generator.close()
return False
def __eq__(self, other):
return not self < other and not other < self
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return other < self
def __ge__(self, other):
return not self < other
def __le__(self, other):
return not other < self
def __lt__(self, other):
if not isinstance(other, Timeout):
return NotImplemented
return self._duration.endtime() < other._duration.endtime()
def __cmp__(self, other):
return self < other
class TimedCancel(Timeout):
def trigger(self, generator):
"""Trigger the timeout on a given generator."""
generator.close()
return False
@six.python_2_unicode_compatible
class ExceptionGroup(Exception):
'''
Container for multiple exceptions.
This exception is used by DependencyTaskGroup when the flag
aggregate_exceptions is set to True and it's re-raised again when all tasks
are finished. This way it can be caught later on so that the individual
exceptions can be acted upon.
'''
def __init__(self, exceptions=None):
if exceptions is None:
exceptions = list()
self.exceptions = list(exceptions)
def __str__(self):
return six.text_type([six.text_type(ex) for ex in self.exceptions])
@six.python_2_unicode_compatible
class TaskRunner(object):
"""
Wrapper for a resumable task (co-routine).
"""
def __init__(self, task, *args, **kwargs):
"""
Initialise with a task function, and arguments to be passed to it when
it is started.
The task function may be a co-routine that yields control flow between
steps.
"""
assert callable(task), "Task is not callable"
self._task = task
self._args = args
self._kwargs = kwargs
self._runner = None
self._done = False
self._timeout = None
self.name = task_description(task)
def __str__(self):
"""Return a human-readable string representation of the task."" | "
text = 'Task %s' % self.name
return six.text_type(text)
def _sleep(self, wait_time):
"""Sleep for the specified number of seconds."""
if ENABLE_SLEEP and wait_time is not None:
LOG.debug('%s sleeping' % six.text_type(self))
eventlet.sleep(wait_time)
def __call__(self, wait_time=1, timeout=None):
"""
Start and run the task to co | mpletion.
The task will first sleep for zero seconds, then sleep for `wait_time`
seconds between steps. To avoid sleeping, pass `None` for `wait_time`.
"""
self.start(timeout=timeout)
# ensure that zero second sleep is applied only if task
# has not completed.
if not self.done() and wait_time:
self._sleep(0)
self.run_to_completion(wait_time=wait_time)
def start(self, timeout=None):
"""
Initialise the task and run its first step.
If a timeout is specified, any attempt to step the task after that
number of seconds has elapsed will result in a Timeout being
raised inside the task.
"""
assert self._runner is None, "Task already started"
assert not self._done, "Task already cancelled"
LOG.debug('%s starting' % six.text_type(self))
if timeout is not None:
self._timeout = Timeout(self, timeout)
result = self._task(*self._args, **self._kwargs)
if isinstance(result, types.GeneratorType):
self._runner = result
self.step()
else:
self._runner = False
self._done = True
LOG.debug('%s done (not resumable)' % six.text_type(self))
def step(self):
"""
Run another step of the task, and return True if the task is complete;
False otherwise.
"""
if not self.done():
assert self._runner is not None, "Task not started"
if self._timeout is not None and self._timeout.expired():
LOG.info(_LI('%s timed out'), six.text_type(self))
self._done = True
self._timeout.trigger(self._runner)
else:
LOG.debug('%s running' % six.text_type(self))
try:
next(self._runner)
except StopIteration:
self._done = True
LOG.debug('%s complete' % six.text_type(self))
return self._done
def run_to_completion(self, wait_time=1):
"""
Run the task to completion.
The task will sleep for `wait_time` seconds between steps. To avoid
sleeping, pass `None` for `wait_time`.
"""
while not self.step():
self._sleep(wait_time)
def cancel(self, grace_period=None):
"""Cancel the task and mark it as done."""
if self.done():
return
if not self.started() or grace_period is None:
LOG.debug('%s cancelled' % six.text_type(self))
self._done = True
if self.started():
self._runner.close()
else:
timeout = TimedCancel(self, grace_period)
if self._timeout is None or timeout < self._timeout:
self._timeout = timeout
def started(self):
"""Return True if the task has been started." |
WebArchivCZ/Seeder | Seeder/harvests/forms.py | Python | mit | 3,363 | 0.000297 | from multiupload.fields import MultiUploadMetaField, MultiUploadMetaInput
from django import forms
from dal import autocomplete
from . import models
# Django 2 fix (https://github.com/Chive/django-multiupload/issues/31)
class PatchedMultiUploadMetaInput(MultiUploadMetaInput):
def render(self, name, value, attrs=None, renderer=None):
return super(PatchedMultiUploadMetaInput, self).render(name, value, attrs)
class PatchedMultiFileField(MultiUploadMetaField):
def __init__(self, *args, **kwargs):
super(PatchedMultiFileField, self).__init__(*args, **kwargs)
self.widget = PatchedMultiUploadMetaInput(
attrs=kwargs.pop('attrs', {}),
multiple=(self.max_num is None or self.max_num > 1),
)
autocomplete_widgets = {
'custom_sources': autocomplete.ModelSelect2Multiple(
url='source:source_autocomplete'
)
}
class HarvestCreateForm(forms.ModelForm):
class Meta:
model = models.Harvest
fields = [
'scheduled_on',
'title',
'annotation',
'archive_it',
'tests',
'target_frequency',
'custom_seeds',
'custom_sources',
'topic_collections',
'topic_collection_frequency',
]
widgets = autocomplete_widgets
class HarvestEditForm(forms.ModelForm):
class Meta:
model = models.Harvest
fields = [
'status',
'scheduled_on',
'title',
'annotation',
'archive_it',
'tests',
'target_frequency',
'custom_seeds',
'custom_sources',
'topic_collections',
'topic_collection_frequency',
]
widgets = autocomplete_widgets
class TopicCollectionForm(forms.ModelForm):
attachments = PatchedMultiFileField(min_num=0, required=False)
class Meta:
model = models.TopicCollection
fields = (
'owner',
'title_cs',
'title_en',
'annotation_cs',
'annotation_en',
'date_from',
'date_to',
'image',
'all_open',
| 'target_frequency',
'custom_seeds',
'custom_sources',
# 'slug',
'keywords',
"attachments",
)
widgets = {
'custom_sources': autocomplete.ModelSelect2Multiple(
url='source:source_public_autocomplete'
),
'keywords': autocomplete.Mod | elSelect2Multiple(
url='source:keyword_autocomplete'
),
}
class TopicCollectionEditForm(TopicCollectionForm):
files_to_delete = forms.MultipleChoiceField(required=False)
def clean_order(self):
updated_order = self.cleaned_data['order']
if updated_order < 1:
raise (forms.ValidationError("Order must be >= 1"))
return updated_order
def __init__(self, attachment_list, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['files_to_delete']._set_choices(
[(file.id, str(file)) for file in attachment_list]
)
class Meta(TopicCollectionForm.Meta):
fields = ('order',) + TopicCollectionForm.Meta.fields + \
('files_to_delete',)
|
ucloud/uai-sdk | uaitrain_tool/caffe/caffe_tool.py | Python | apache-2.0 | 3,505 | 0.003138 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
from uaitrain.operation.pack_docker_image.caffe_pack_op import CaffeUAITrainDockerImagePackOp
from uaitrain.operation.create_train_job.base_create_op import BaseUAITrainCreateTrainJobOp
from uaitrain.operation.stop_train_job.base_stop_op import BaseUAITrainStopTrainJobOp
from uaitrain.operation.delete_train_job.base_delete_op import BaseUAITrainDeleteTrainJobOp
from uaitrain.operation.list_train_job.base_list_job_op import BaseUAITrainListTrainJobOp
from uaitrain.operation.info_train_job.info_train_op import BaseUAITrainRunningInfoOp
from uaitrain.operation.get_realtime_log.base_log_op import BaseUAITrainGetRealtimeLogOp
from uaitrain.operation.list_bill_info.base_bill_op import BaseUAITrainListBillInfoOp
from uaitrain.operation.rename_train_job.base_rename_op import BaseUAITrainRenameTrainJobOp
from uaitrain.operation.get_train_job_conf.base_conf_op import BaseUAITrainTrainJobConfOp
from uaitrain.operation.get_log_topic.get_log_topic import BaseUAITrainGetLogTopicOp
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='AI Caffe Arch Deployer',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
subparsers = parser.add_subparsers(dest='commands', help='commands')
pack_op = CaffeUAITrainDockerImagePackOp(subparsers)
create_op = BaseUAITrainCreateTrainJobOp(subparsers)
stop_op = BaseUAITrainStopTrainJobOp(subparsers)
delete_op = BaseUAITrainDeleteTrainJobOp(subparsers)
list_op = BaseUAITrainListTrainJobOp(subparsers)
info_op = BaseUAITrainRunningInfoOp(subparsers)
log_op = BaseUAITrainGetRealtimeLogOp(subparsers)
bill_op = BaseUAITrainListBillInfoOp(subparsers)
rename_op = BaseUAITrainRenameTrainJobOp(subparsers)
conf_op = BaseUAITrainTrainJobConfOp(subparsers)
topic_op = BaseUAITrainGetLogTopicOp(subparsers)
cmd_args = vars(parser.parse_args())
if cmd_args['commands'] == 'pack':
pack_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'create':
create_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'stop':
stop_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'delete':
delete_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'list':
list_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'info':
info_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'log':
log_op.cmd_run(cmd_args)
elif cmd_args[ | 'commands'] == 'bill':
bill_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'rename':
rename_op.cmd_run(cmd_args) |
elif cmd_args['commands'] == 'conf':
conf_op.cmd_run(cmd_args)
elif cmd_args['commands'] == 'topic':
topic_op.cmd_run(cmd_args)
else:
print("Unknown CMD, please use python caffe_tool.py -h to check")
|
sunlightlabs/django-nonprofit | nonprofit/mailroom/admin.py | Python | bsd-3-clause | 207 | 0.024155 | from django.contri | b import admin
from nonprofit.mailroom.models import Slot
class SlotAdmin(admin.ModelAdmin):
list_display = ('description','forward_to','enabled')
admin.site.re | gister(Slot, SlotAdmin) |
LaoQi/icode | mypylib/remoto/lib/vendor/execnet/multi.py | Python | gpl-2.0 | 10,046 | 0.001294 | """
Managing Gateway Groups and interactions with multiple channels.
(c) 2008-2014, Holger Krekel and others
"""
import sys, atexit
from execnet import XSpec
from execnet import gateway_io, gateway_bootstrap
from execnet.gateway_base import reraise, trace, get_execmodel
from threading import Lock
NO_ENDMARKER_WANTED = object()
class Group(object):
""" Gateway Groups. """
defaultspec = "popen"
def __init__(self, xspecs=(), execmodel="thread"):
""" initialize group and make gateways as specified.
execmodel can be 'thread' or 'eventlet'.
"""
self._gateways = []
self._autoidcounter = 0
self._autoidlock = Lock()
self._gateways_to_join = []
# we use the same execmodel for all of the Gateway objects
# we spawn on our side. Probably we should not allow different
# execmodels between different groups but not clear.
# Note that "other side" execmodels may differ and is typically
# specified by the spec passed to makegateway.
self.set_execmodel(execmodel)
for xspec in xspecs:
self.makegateway(xspec)
atexit.register(self._cleanup_atexit)
@property
def execmodel(self):
return self._execmodel
@property
def remote_execmodel(self):
return self._remote_execmodel
def set_execmodel(self, execmodel, remote_execmodel=None):
""" Set the execution model for local and remote site.
execmodel can be one of "thread" or "eventlet" (XXX gevent).
It determines the execution model for any newly created gateway.
If remote_execmodel is not specified it takes on the value
of execmodel.
NOTE: Execution models can only be set before any gateway is created.
"""
if self._gateways:
raise ValueError("can not set execution models if "
"gateways have been created already")
if remote_execmodel is None:
remote_execmodel = execmodel
self._execmodel = get_execmodel(execmodel)
self._remote_execmodel = get_execmodel(remote_execmodel)
def __repr__(self):
idgateways = [gw.id for gw in self]
return "<Group %r>" %(idgateways)
def __getitem__(self, key):
if isinstance(key, int):
return self._gateways[key]
for gw in self._gateways:
if gw == key or gw.id == key:
return gw
raise KeyError(key)
def __contains__(self, key):
try:
self[key]
return True
except KeyError:
return False
def __len__(self):
return len(self._gateways)
def __iter__(self):
return iter(list(self._gateways))
def makegateway(self, spec=None):
"""create and configure a gateway to a Python interpreter.
The ``spec`` string encodes the target gateway type
and configuration information. The general format is::
key1=value1//key2=value2//...
If you leave out the ``=value`` part a True value is assumed.
Valid types: ``popen``, ``ssh=hostname``, ``socket=host:port``.
Valid configuration::
id=<string> specifies the gateway id
python=<path> specifies which python interpreter to execute
execmodel=model 'thread', 'eventlet', 'gevent' model for execution
chdir=<path> specifies to which directory to change
nice=<path> specifies process priority of new process
env:NAME=value specifies a remote environment variable setting.
If no spec is given, self.defaultspec is used.
"""
if not spec:
spec = self.defaultspec
if not isinstance(spec, XSpec):
spec = XSpec(spec)
self.allocate_id(spec)
if spec.execmodel is None:
spec.execmodel = self.remote_execmodel.backend
if spec.via:
assert not spec.socket
master = self[spec.via]
proxy_channel = master.remote_exec(gateway_io)
proxy_channel.send(vars(spec))
proxy_io_master = gateway_io.Pr | oxyIO(proxy_channel, self.execmodel)
gw = gateway_bootstrap.bootstrap(proxy_io_master, spec)
elif spec.popen or spec.ssh:
io = gateway_io.create_io(spec, execmodel=self.execmodel)
gw = gateway_bootstrap.bootstrap(io, spec)
elif spec.socket:
from execnet import gateway_socket
io = gateway_socket.create_io(spec, self, execmodel=self.execmodel)
gw = gateway_bootstrap.bootstrap(io, spec) |
else:
raise ValueError("no gateway type found for %r" % (spec._spec,))
gw.spec = spec
self._register(gw)
if spec.chdir or spec.nice or spec.env:
channel = gw.remote_exec("""
import os
path, nice, env = channel.receive()
if path:
if not os.path.exists(path):
os.mkdir(path)
os.chdir(path)
if nice and hasattr(os, 'nice'):
os.nice(nice)
if env:
for name, value in env.items():
os.environ[name] = value
""")
nice = spec.nice and int(spec.nice) or 0
channel.send((spec.chdir, nice, spec.env))
channel.waitclose()
return gw
def allocate_id(self, spec):
""" (re-entrant) allocate id for the given xspec object. """
if spec.id is None:
with self._autoidlock:
id = "gw" + str(self._autoidcounter)
self._autoidcounter += 1
if id in self:
raise ValueError("already have gateway with id %r" %(id,))
spec.id = id
def _register(self, gateway):
assert not hasattr(gateway, '_group')
assert gateway.id
assert id not in self
self._gateways.append(gateway)
gateway._group = self
def _unregister(self, gateway):
self._gateways.remove(gateway)
self._gateways_to_join.append(gateway)
def _cleanup_atexit(self):
trace("=== atexit cleanup %r ===" %(self,))
self.terminate(timeout=1.0)
def terminate(self, timeout=None):
""" trigger exit of member gateways and wait for termination
of member gateways and associated subprocesses. After waiting
timeout seconds try to to kill local sub processes of popen-
and ssh-gateways. Timeout defaults to None meaning
open-ended waiting and no kill attempts.
"""
while self:
vias = {}
for gw in self:
if gw.spec.via:
vias[gw.spec.via] = True
for gw in self:
if gw.id not in vias:
gw.exit()
def join_wait(gw):
gw.join()
gw._io.wait()
def kill(gw):
trace("Gateways did not come down after timeout: %r" % gw)
gw._io.kill()
safe_terminate(self.execmodel, timeout, [
(lambda: join_wait(gw), lambda: kill(gw))
for gw in self._gateways_to_join])
self._gateways_to_join[:] = []
def remote_exec(self, source, **kwargs):
""" remote_exec source on all member gateways and return
MultiChannel connecting to all sub processes.
"""
channels = []
for gw in self:
channels.append(gw.remote_exec(source, **kwargs))
return MultiChannel(channels)
class MultiChannel:
def __init__(self, channels):
self._channels = channels
def __len__(self):
return len(self._channels)
def __iter__(self):
return iter(self._channels)
def __getitem__(self, key):
return self._channels[key]
def __contains__(self, chan):
return chan in self._channels
def send_each(self, item):
for ch in self._channels:
ch.send(item)
def receive_ea |
sbergot/invok | invok/DependencyNode.py | Python | bsd-3-clause | 552 | 0.001812 | import inspect
import functools
class DependencyNode:
def __init_ | _(self, cls, cached):
self.cls = cls
self.deps = self.get_deps(cls)
self.cached = cached
def get_deps(self, cls):
try:
return inspect.getargspec(cls.__init__).args[1:]
except AttributeError:
# no __init__ --> no dep
return []
def config(self, **kwargs):
for name in kwargs:
self.deps.remo | ve(name)
self.cls = functools.partial(self.cls, **kwargs)
|
LAIRLAB/libpyarr | find_epd.py | Python | bsd-3-clause | 2,436 | 0.007389 | #! /usr/bin/env python
'''
Recursively looks for EPD
Writes to STDOUT and STDERR the found library and the found include directory. In this way,
this script can be executed within CMAKE and the Python Libraries and Includes can be set to the STDOUT and STDERR streams
Checks for a minimum version of Python, default 2.7, but can be specified as a command-line argument
The root paths it starts looking for EPD in are contained in the 'check_dirs' global variable
'''
import os, sys, getpass
check_dirs = ['/opt/local/lib', '/home/%s' % getpass.getuser(), '/usr/share/', '/home/ecuzzill', '/opt/local', '/opt']
def main():
if len(sys.argv) == 2:
min_py_version = sys.argv[1]
#make sure it's a version by casting to float
try:
min_py_version = 'libpython' + str(float(min_py_version))
except ValueError:
min_py_version = 'libpython2.7'
else:
min_py_version = 'libpython2.7'
found = False
for d in check_dirs:
for (dname, dnames, fnames) in os.walk(d):
for r in dnames:
#found an 'epd'-ish directory
if r.find('epd') >= 0:
full_dir = '%s/%s' % (dname, r)
lib_exists = False
#find the library
for lib in [x for x in os.listdir(full_dir + '/lib/') if x.find('libpython') >=0 ]:
| lib_version = '.'.join(lib.split('.')[:2])
if lib_version >= min_py_version:
lib_exists = True
break
if not lib_exists:
break
lib = '%s/lib/%s.so' % (full_dir, lib_version)
| include_dir = '%s/include/%s' % (full_dir, min_py_version[3:])
#success if this passes
if os.path.isfile(lib) and os.path.isdir(include_dir):
bin_path = '%s/bin/python' % (full_dir)
unicode_support = os.system("%s -c 'import sys; sys.exit(sys.maxunicode > 65535)'" % (bin_path))
if unicode_support == 0:
sys.stdout.write(lib)
sys.stderr.write(include_dir)
found = True
break
if found:
break
if __name__ == '__main__':
main()
|
sniemi/SamPy | sandbox/src1/examples/dash_control.py | Python | bsd-2-clause | 264 | 0.015152 | #!/usr/bin/env python
| """
You can precisely specify dashes with an on/off ink rect sequence in
points.
"""
from pylab import *
dashes = [5,2,10,5] # 5 points on, 2 off, 3 on, 1 off
l, = plot(arange(20), '--')
l. | set_dashes(dashes)
savefig('dash_control')
show()
|
desaster/uusipuu | modules/memo.py | Python | bsd-2-clause | 3,945 | 0.001267 | # -*- coding: ISO-8859-15 -*-
from core.Uusipuu import UusipuuModule
import random, time
class Module(UusipuuModule):
def startup(self):
if 'memo' not in self.config:
self.config['memo'] = {}
def privmsg(self, user, target, msg):
if target != self.channel:
return
pieces = msg.strip().split(' ', 1)
if len(pieces) != 2:
return
cmd = pieces[0].strip()
params = pieces[1].strip()
if cmd == '??':
self.meta_show(user, params)
elif cmd == '?!':
self.meta_searchkey(user, params.strip())
elif cmd == '?#':
self.meta_searchvalue(user, params.strip())
def cmd_memo(self, user, target, params):
pieces = params.strip().split(' ', 1)
if len(pieces) != 2:
self.chanmsg('Insufficient parameters')
return
cmd = pieces[0].strip()
params = pieces[1].strip()
if cmd == 'add':
self.meta_addmemo(user, params)
elif cmd in ['del', 'delete', 'remove']:
self.meta_delmemo(user, params)
elif cmd == 'show':
self.meta_show(user, params)
elif cmd == 'info':
self.meta_info(user, params)
elif cmd in ['search', 'searchkey', 'sk']:
self.meta_searchkey(user, params.strip())
elif cmd in ['searchvalue', 'sv']:
self.meta_searchvalue(user, params.strip())
def meta_show(self, user, key):
self.do_show(user, key)
def meta_info(self, user, key):
self.do_show(user, key)
self.do_info(user, key)
def meta_searchkey(self, user, key):
nick = user.split('!', 1)[0]
keys = [x for x in self.config['memo'] if x.count(key)]
if not keys:
self.chanmsg('No keys found matching "%s"' % (key))
return
self.do_show(user, random.choice(keys))
def meta_searchvalue(self, user, value):
nick = user.split('!', 1)[0]
keys = [x for x in self.config['memo'] \
if self.config['memo'][x]['value'].count(value)]
if not keys:
self.chanmsg('No values found matching "%s"' % (value))
return
self.do_show(user, random.choice(keys))
def do_show(self, user, key):
nick = user.split('!', 1)[0]
if key not in self.config['memo']:
self.chanmsg('Entry not found (%s)' % key)
return ()
self.chanmsg('%s: %s' % (key, str(self.config['memo'][key]['value'])))
def do_info(self, user, key):
if key not in self.config['memo']:
return
self.chanmsg('%s created by %s [%s]' % (key,
self.config['memo'][key]['user'],
time.ctime(self.config['memo'][key]['add | ed'])))
def meta_addmemo(self, user, params):
nick = user.split('!', 1)[0]
pieces = params.strip().split(' ', 1)
| if len(pieces) < 2:
self.chanmsg('Insufficient parameters')
return
key, value = pieces[0].strip(), pieces[1].strip()
if key in self.config['memo']:
self.chanmsg('%s: An entry by that name already exists' % nick)
return
self.config['memo'][key] = {
'value': value,
'user': user,
'added': int(time.time()),
}
self.save()
self.chanmsg('Memo entry "%s" successfully added' % (str(key)))
def meta_delmemo(self, user, params):
nick = user.split('!', 1)[0]
pieces = params.strip().split(' ', 1)
key = pieces[0].strip()
if key not in self.config['memo']:
self.chanmsg('Entry not found (%s)' % key)
return
del self.config['memo'][key]
self.save()
self.chanmsg('Memo entry "%s" successfully removed' % (key))
# vim: set et sw=4:
|
richardbeare/SimpleITK | Examples/SliceBySliceDecorator/SliceBySliceDecorator.py | Python | apache-2.0 | 2,991 | 0.002675 | #!/usr/bin/env python
# =========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain | a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================== | =======
from __future__ import print_function
import SimpleITK as sitk
import sys
import itertools
from functools import wraps
def slice_by_slice_decorator(func):
"""
A function decorator which executes func on each 3D sub-volume and *in-place* pastes the results into the
input image. The input image type and the output image type are required to be the same type.
:param func: A function which take a SimpleITK Image as it's first argument and returns an Image as results.
:return: A decorated function.
"""
iter_dim = 2
@wraps(func)
def slice_by_slice(image, *args, **kwargs):
dim = image.GetDimension()
if dim <= iter_dim:
#
image = func(image, *args, **kwargs)
return image
extract_size = list(image.GetSize())
extract_size[iter_dim:] = itertools.repeat(0, dim-iter_dim)
extract_index = [0] * dim
paste_idx = [slice(None, None)] * dim
extractor = sitk.ExtractImageFilter()
extractor.SetSize(extract_size)
for high_idx in itertools.product(*[range(s) for s in image.GetSize()[iter_dim:]]):
# The lower 2 elements of extract_index are always 0.
# The remaining indices are iterated through all indexes.
extract_index[iter_dim:] = high_idx
extractor.SetIndex(extract_index)
# Sliced based indexing for setting image values internally uses the PasteImageFilter executed "inplace".
# The lower 2 elements are equivalent to ":". For a less general case the assignment could be written
# as image[:,:,z] = ...
paste_idx[iter_dim:] = high_idx
image[paste_idx] = func(extractor.Execute(image), *args, **kwargs)
return image
return slice_by_slice
if len(sys.argv) < 3:
print("Usage: SubDimensionProcess inputImage outputImage", file=sys.stderr)
sys.exit(1)
inputImage = sitk.ReadImage(sys.argv[1])
# Decorate the function
adaptive_histogram_equalization_2d = slice_by_slice_decorator(sitk.AdaptiveHistogramEqualization)
adaptive_histogram_equalization_2d(inputImage, radius=[20]*2, alpha=0.3, beta=0.3)
sitk.WriteImage(inputImage, sys.argv[2])
|
gmatteo/pymatgen | pymatgen/io/tests/test_zeopp.py | Python | mit | 11,239 | 0.000623 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
__author__ = "Bharat Medasani"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "bkmedasani@lbl.gov"
__date__ = "Aug 2, 2013"
import os
import re
import unittest
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.core.periodic_table import Species
from pymatgen.core.structure import Molecule, Structure
from pymatgen.io.cif import CifParser
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.io.zeopp import (
ZeoCssr,
ZeoVoronoiXYZ,
get_free_sphere_params,
get_high_accuracy_voronoi_nodes,
get_void_volume_surfarea,
get_voronoi_nodes,
)
from pymatgen.util.testing import PymatgenTest
try:
import zeo
except ImportError:
zeo = None
@unittest.skipIf(not zeo, "zeo not present.")
class ZeoCssrTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR")
p = Poscar.from_file(filepath)
self.zeocssr = ZeoCssr(p.structure)
def test_str(self):
expected_string = """4.7595 10.4118 6.0672
90.00 90.00 90.00 SPGR = 1 P 1 OPT = 1
24 0
0 Fe4 P4 O16
1 Fe 0.4749 0.2187 0.7500 0 0 0 0 0 0 0 0 0.0000
2 Fe 0.9749 0.2813 0.2500 0 0 0 0 0 0 0 0 0.0000
3 Fe 0.0251 0.7187 0.7500 0 0 0 0 0 0 0 0 0.0000
4 Fe 0.5251 0.7813 0.2500 0 0 0 0 0 0 0 0 0.0000
5 P 0.4182 0.0946 0.2500 0 0 0 0 0 0 0 0 0.0000
6 P 0.9182 0.4054 0.7500 0 0 0 0 0 0 0 0 0.0000
7 P 0.0818 0.5946 0.2500 0 0 0 0 0 0 0 0 0.0000
8 P 0.5818 0.9054 0.7500 0 0 0 0 0 0 0 0 0.0000
9 O 0.7071 0.0434 0.7500 0 0 0 0 0 0 0 0 0.0000
10 O 0.7413 0.0966 0.2500 0 0 0 0 0 0 0 0 0.0000
11 O 0.2854 0.1657 0.0461 0 0 0 0 0 0 0 0 0.0000
12 O 0.2854 0.1657 0.4539 0 0 0 0 0 0 0 0 0.0000
13 O 0.7854 0.3343 0.5461 0 0 0 0 0 0 0 0 0.0000
14 O 0.7854 0.3343 0.9539 0 0 0 0 0 0 0 0 0.0000
15 O 0.2413 0.4034 0.7500 0 0 0 0 0 0 0 0 0.0000
16 O 0.2071 0.4566 0.2500 0 0 0 0 0 0 0 0 0.0000
17 O 0.7929 0.5434 0.7500 0 0 0 0 0 0 0 0 0.0000
18 O 0.7587 0.5966 0.2500 0 0 0 0 0 0 0 0 0.0000
19 O 0.2146 0.6657 0.0461 0 0 0 0 0 0 0 0 0.0000
20 O 0.2146 0.6657 0.4539 0 0 0 0 0 0 0 0 0.0000
21 O 0.7146 0.8343 0.5461 0 0 0 0 0 0 0 0 0.0000
22 O 0.7146 0.8343 0.9539 0 0 0 0 0 0 0 0 0.0000
23 O 0.2587 0.9034 0.7500 0 0 0 0 0 0 0 0 0.0000
24 O 0.2929 0.9566 0.2500 0 0 0 0 0 0 0 0 0.0000"""
self.assertEqual(str(self.zeocssr), expected_string)
def test_from_file(self):
filename = os.path.join(PymatgenTest.TEST_FILES_DIR, "EDI.cssr")
zeocssr = ZeoCssr.from_file(filename)
self.assertIsInstance(zeocssr.structure, Structure)
# @unittest.skipIf(not zeo, "zeo not present.")
class ZeoCssrOxiTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR")
p = Poscar.from_file(filepath)
structure = BVAnalyzer().get_oxi_state_decorated_structure(p.structure)
self.zeocssr = ZeoCssr(structure)
def test_str(self):
expected_string = """4.7595 10.4118 6.0672
90.00 90.00 90.00 SPGR = 1 P 1 OPT = 1
24 0
0 Fe4 P4 O16
1 Fe3+ 0.4749 0.2187 0.7500 0 0 0 0 0 0 0 0 0.0000
2 Fe3+ 0.9749 0.2813 0.2500 0 0 0 0 0 0 0 0 0.0000
3 Fe3+ 0.0251 0.7187 0.7500 0 0 0 0 0 0 0 0 0.0000
4 Fe3+ 0.5251 0.7813 0.2500 0 0 0 0 0 0 0 0 0.0000
5 P5+ 0.4182 0.0946 0.2500 0 0 0 0 0 0 0 0 0.0000
6 P5+ 0.9182 0.4054 0.7500 0 0 0 0 0 0 0 0 0.0000
7 P5+ 0.0818 0.5946 0.2500 0 0 0 0 0 0 0 0 0.0000
8 P5+ 0.5818 0.9054 0.7500 0 0 0 0 0 0 0 0 0.0000
9 O2- 0.7071 0.0434 0.7500 0 0 0 0 0 0 0 0 0.0000
10 O2- 0.7413 0.0966 0.2500 0 0 0 0 0 0 0 0 0.0000
11 O2- 0.2854 0.1657 0.0461 0 0 0 0 0 0 0 0 0.0000
12 O2- 0.2854 0.1657 0.4539 0 0 0 0 0 0 0 0 0.0000
13 O2- 0.7854 0.3343 0.5461 0 0 0 0 0 0 0 0 0.0000
14 O2- 0.7854 0.3343 0.9539 0 0 0 0 0 0 0 0 0.0000
15 O2- 0.2413 0.4034 0.7500 0 0 0 0 0 0 0 0 0.0000
16 O2- 0.2071 0.4566 0.2500 0 0 0 0 0 0 0 0 0.0000
17 O2- 0.7929 0.5434 0.7500 0 0 0 0 0 0 0 0 0.0000
18 O2- 0.7587 0.5966 0.2500 0 0 0 0 0 0 0 0 0.0000
19 O2- 0.2146 0.6657 0.0461 0 0 0 0 0 0 0 0 0.0000
20 O2- 0.2146 0.6657 0.4539 0 0 0 0 0 0 0 0 0.0000
21 O2- 0.7146 0.8343 0.5461 0 0 0 0 0 0 0 0 0.0000
22 O2- 0.7146 0.8343 0.9539 0 0 0 0 0 0 0 0 0.0000
23 O2- 0.2587 0.9034 0.7500 0 0 0 0 0 0 0 0 0.0000
24 O2- 0.2929 0.9566 0.2500 0 0 0 0 0 0 0 0 0.0000"""
self.assertEqual(str(self.zeocssr), expected_string)
def test_from_file(self):
filename = os.path.join(PymatgenTest.TEST_FILES_DIR, "EDI_oxistate_decorated.cssr")
| zeocssr = ZeoCssr.from_file(filename)
self.assertIsInstance(zeocssr.structure, Structure)
@unittest.skipIf(not zeo, "zeo not present.")
class ZeoVoronoiXYZTest(unittest.TestCase):
def setUp(self):
coords = [
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000],
]
prop = [0.4, 0.2, 0.2, 0.2, 0. | 2]
self.mol = Molecule(["C", "H", "H", "H", "H"], coords, site_properties={"voronoi_radius": prop})
self.xyz = ZeoVoronoiXYZ(self.mol)
def test_str(self):
ans = """5
H4 C1
C 0.000000 0.000000 0.000000 0.400000
H 1.089000 0.000000 0.000000 0.200000
H -0.363000 1.026719 0.000000 0.200000
H -0.363000 -0.513360 -0.889165 0.200000
H -0.363000 -0.513360 0.889165 0.200000"""
self.assertEqual(str(self.xyz), ans)
self.assertEqual(str(self.xyz), ans)
def test_from_file(self):
filename = os.path.join(PymatgenTest.TEST_FILES_DIR, "EDI_voro.xyz")
vor = ZeoVoronoiXYZ.from_file(filename)
self.assertIsInstance(vor.molecule, Molecule)
@unittest.skipIf(not zeo, "zeo not present.")
class GetVoronoiNodesTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR")
p = Poscar.from_file(filepath)
self.structure = p.structure
bv = BVAnalyzer()
valences = bv.get_valences(self.structure)
el = [site.species_string for site in self.structure.sites]
valence_dict = dict(zip(el, valences))
self.rad_dict = {}
for k, v in valence_dict.items():
self.rad_dict[k] = float(Species(k, v).ionic_radius)
assert len(self.rad_dict) == len(self.structure.composition)
def test_get_voronoi_nodes(self):
(
vor_node_struct,
vor_edge_center_struct,
vor_face_center_struct,
) = get_voronoi_nodes(self.structure, self.rad_dict)
self.assertIsInstance(vor_node_struct, Structure)
self.assertIsInstance(vor_edge_center_struct, Structure)
self.assertIsInstance(vor_face_center_struct, Structure)
print(len(vor_node_struct.sites))
print(len(vor_face_center_struct.sites))
@unittest.skip("file free_sph.cif not present")
class GetFreeSphereParamsTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "free_sph.cif")
self.structure = Structure.from_file(filepath)
self.rad_dict = {
"Ge": 0.67,
"P": 0.52,
"S": 1.7,
"La": 1.17,
"Zr": 0.86,
"O": 1.26,
}
def test_get_free_sphere_params(self):
free_sph_params = get_free_sphere_params(self.structure, rad_dict=self.rad_dict)
# Zeo results can change in future. Hence loose comparison
self.assertAlmostEqual(free_sph_params["inc_sph_max_dia"], 2.58251, places=1)
self.assertAlmostEqual(free_sph_params["free_sph_max_dia"], 1.29452, places=1)
self.assertAlmostEqual(free_sph_params["inc_sph_along_free_sph_path_max_dia"], 2.58251, places=1)
@unittest.skipIf(not zeo, "zeo not present.")
class GetHighAccuracyVoronoiNodesTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR")
p = Poscar.from_file(filepath)
self.structure = p.structure
bv = BVAnalyzer()
|
zinderud/ysa | sklearn/3.py | Python | apache-2.0 | 655 | 0.044615 | import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
num | _arsa=120
np.random.seed(40)
x = np.random.randint(low=200,high=2000,size=num_arsa)
np.random.seed(40)
y = x*100.0+np.random.randint(low=10000,high=100000,size=num_arsa)
print(x)
print(y)
plt.scatter(x,y)
m,b = np.polyfit(x,y,1) # np.polyfit(x ekseni, y ekseni, kaçıncı dereceden polinom denklemi)
a = np.arange(2299)
plt.scatter(x,y) # Scatter ile nokta çizdirimi yapıyoruz.
plt.plot(m*a+b) #
z = 32
tahmin = m*z+b
print(tahmin)
plt.scatter(z,tahmin,c="re | d",marker=">")
plt.show()
print("y=",m,"x+",b) |
reclosedev/requests-cache | examples/generate_test_db.py | Python | bsd-2-clause | 4,628 | 0.002377 | #!/usr/bin/env python
"""An example of generating a test database with a large number of semi-randomized responses.
This is useful, for example, for reproducing issues that only occur with large caches.
"""
import logging
from datetime import datetime, timedelta
from os import urandom
from os.path import getsize
from random import random
from time import perf_counter as time
import requests
from rich.progress import Progress
from requests_cache import ALL_METHODS, CachedResponse, CachedSession
from requests_cache.models.response import format_file_size
from tests.conftest import HTTPBIN_FORMATS, HTTPBIN_METHODS |
# TODO: If others would find it useful, these settings could be turned into CLI args
BACKEND = 'sqlite'
CACHE_NAME = 'rubbish_bin'
|
BASE_RESPONSE = requests.get('https://httpbin.org/get')
HTTPBIN_EXTRA_ENDPOINTS = [
'anything',
'bytes/1024' 'cookies',
'ip',
'redirect/5',
'stream-bytes/1024',
]
MAX_EXPIRE_AFTER = 30 # In seconds; set to -1 to disable expiration
MAX_RESPONSE_SIZE = 10000 # In bytes
N_RESPONSES = 100000
N_INVALID_RESPONSES = 10
logging.basicConfig(level='INFO')
logger = logging.getLogger('requests_cache')
class InvalidResponse(CachedResponse):
"""Response that will raise an exception when deserialized"""
def __setstate__(self, d):
raise ValueError
def populate_cache(progress, task):
session = CachedSession(CACHE_NAME, backend=BACKEND, allowable_methods=ALL_METHODS)
n_previous_responses = len(session.cache.responses)
# Cache a variety of different response formats, which may result in different behavior
urls = [
('GET', f'https://httpbin.org/{endpoint}')
for endpoint in HTTPBIN_FORMATS + HTTPBIN_EXTRA_ENDPOINTS
]
urls += [(method, f'https://httpbin.org/{method.lower()}') for method in HTTPBIN_METHODS]
for method, url in urls:
session.request(method, url)
progress.update(task, advance=1)
# Cache a large number of responses with randomized response content, which will expire at random times
with session.cache.responses.bulk_commit():
for i in range(N_RESPONSES):
new_response = get_randomized_response(i + n_previous_responses)
if MAX_EXPIRE_AFTER >= 0:
expires = datetime.now() + timedelta(seconds=random() * MAX_EXPIRE_AFTER)
else:
expires = None
session.cache.save_response(new_response, expires=expires)
progress.update(task, advance=1)
# Add some invalid responses
with session.cache.responses.bulk_commit():
for i in range(N_INVALID_RESPONSES):
new_response = InvalidResponse.from_response(BASE_RESPONSE)
new_response.request.url += f'/invalid_response_{i}'
key = session.cache.create_key(new_response.request)
session.cache.responses[key] = new_response
progress.update(task, advance=1)
def get_randomized_response(i=0):
"""Get a response with randomized content"""
new_response = CachedResponse.from_response(BASE_RESPONSE)
n_bytes = int(random() * MAX_RESPONSE_SIZE)
new_response._content = urandom(n_bytes)
new_response.request.url += f'/response_{i}'
return new_response
def remove_expired_responses(expire_after=None):
logger.setLevel('DEBUG')
session = CachedSession(CACHE_NAME)
total_responses = len(session.cache.responses)
start = time()
session.remove_expired_responses(expire_after=expire_after)
elapsed = time() - start
n_removed = total_responses - len(session.cache.responses)
logger.info(
f'Removed {n_removed} expired/invalid responses in {elapsed:.2f} seconds '
f'(avg {(elapsed / n_removed) * 1000:.2f}ms per response)'
)
def main():
total_responses = len(HTTPBIN_FORMATS + HTTPBIN_EXTRA_ENDPOINTS + HTTPBIN_METHODS)
total_responses += N_RESPONSES + N_INVALID_RESPONSES
with Progress() as progress:
task = progress.add_task('[cyan]Generating responses...', total=total_responses)
populate_cache(progress, task)
actual_total_responses = len(CachedSession(CACHE_NAME).cache.responses)
logger.info(f'Generated cache with {actual_total_responses} responses')
if BACKEND == 'sqlite':
cache_file_size = format_file_size(getsize(f'{CACHE_NAME}.sqlite'))
logger.info(f'Total cache size: {cache_file_size}')
if __name__ == '__main__':
main()
# Remove some responses (with randomized expiration)
# remove_expired_responses()
# Expire and remove all responses
# remove_expired_responses(expire_after=1)
|
tiagormk/gem5-hmp | configs/common/Caches.py | Python | bsd-3-clause | 3,250 | 0.001231 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
| #
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributi | ons of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
from m5.objects import *
# Base implementations of L1, L2, IO and TLB-walker caches. There are
# used in the regressions and also as base components in the
# system-configuration scripts. The values are meant to serve as a
# starting point, and specific parameters can be overridden in the
# specific instantiations.
class L1Cache(BaseCache):
assoc = 2
hit_latency = 2
response_latency = 2
block_size = 64
mshrs = 4
tgts_per_mshr = 20
is_top_level = True
class L2Cache(BaseCache):
assoc = 8
block_size = 64
hit_latency = 20
response_latency = 20
mshrs = 20
tgts_per_mshr = 12
write_buffers = 8
class IOCache(BaseCache):
assoc = 8
block_size = 64
hit_latency = 50
response_latency = 50
mshrs = 20
size = '1kB'
tgts_per_mshr = 12
forward_snoops = False
is_top_level = True
class PageTableWalkerCache(BaseCache):
assoc = 2
block_size = 64
hit_latency = 2
response_latency = 2
mshrs = 10
size = '1kB'
tgts_per_mshr = 12
is_top_level = True
|
ucb-sejits/ctree | test/test_lambda.py | Python | bsd-2-clause | 2,548 | 0.009812 | import unittest
import ctypes as ct
import ast
import sys
from ctree.transformations import PyBasicConversions
from ctree.transforms import DeclarationFiller
from ctree.c.nodes import *
class TestAssigns(unittest.TestCase):
def mini_transform(self, node):
"""
This method acts as a simulation of a specializer's transform( | ) method. It's the bare minimum required of
a transform() method by the specializer writer.
:param node: the node to transform
:return: the node transformed through PyBasicConversions into a rough C-AST.
"""
transformed_node = PyBasicConversions().visit(node)
transformed_node.name = "apply"
transformed_node.return_type = ct.c_fl | oat()
for param in transformed_node.params:
param.type = ct.c_float()
return transformed_node
def mini__call__(self, node):
"""
This method acts as a simulation of jit.py's __call__() method. The specializer writer does not have to write
this method.
:param node: the node to generate code for
:return: a type-complete C-AST corresponding to the input node
"""
transformed_node = self.mini_transform(node)
return DeclarationFiller().visit(transformed_node)
@unittest.skipIf(sys.version_info >= (3,0), 'Lambdas changed in py3k')
def test_one_arg_lambda(self):
"""
This method tests the squaring lambda function, a one argument lambda function.
"""
square_lambda_node = ast.Lambda(args = ast.arguments([SymbolRef("x")], None, None, None), body = Mul(SymbolRef("x"), SymbolRef("x")))
# simulating __call__()
type_inferred_node = self.mini__call__(square_lambda_node)
self.assertEqual(str(type_inferred_node), "float apply(float x) {\n" + \
" return x * x;\n}")
@unittest.skipIf(sys.version_info >= (3,0), 'Lambdas changed in py3k')
def test_two_arg_lambda(self):
"""
This method tests the adding lambda function, a two argument lambda function.
"""
add_lambda_node = ast.Lambda(args = ast.arguments([SymbolRef("x"), SymbolRef("y")], None, None, None), body = Add(SymbolRef("x"), SymbolRef("y")))
# simulating __call__()
type_inferred_node = self.mini__call__(add_lambda_node)
self.assertEqual(str(type_inferred_node), "float apply(float x, float y) {\n" + \
" return x + y;\n}")
|
appcelerator/titanium_mobile_tooling | templates/plugin/build.py | Python | apache-2.0 | 2,990 | 0.049498 | #!/usr/bin/env python
#
# Appcelerator Titanium Plugin Packager
#
#
import os, sys, glob, string
import zipfile
cwd = os.path.dirname(__file__)
required_plugin_keys = ['version','pluginid','description','copyright','license','minsdk']
plugin_defaults = {
'description':'My plugin',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) 2010 by Your Company',
}
plugin_license_default = "TODO: place your license here and we'll include it in the plugin distribution"
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open('LICENSE').read()
if c.find(plugin_license_default)!=1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_plugin_keys:
if not manifest.has_key(key): die("missing required manifest key '%s' | " % key)
if plugin_defaults.has_key(key):
defvalue = plugin_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','README','build.py']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[]):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignor | ed directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e)==2 and e[1]=='.pyc':continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def package_plugin(manifest,mf):
pluginid = manifest['pluginid'].lower()
version = manifest['version']
pluginzip = '%s-%s.zip' % (pluginid,version)
if os.path.exists(pluginzip): os.remove(pluginzip)
zf = zipfile.ZipFile(pluginzip, 'w', zipfile.ZIP_DEFLATED)
pluginpath = 'plugins/%s/%s' % (pluginid,version)
for d in os.listdir(cwd):
if os.path.isdir(d):
if d in ignoreDirs: continue
zip_dir(zf,dn,'%s/%s' % (pluginpath,dn))
else:
if d in ignoreFiles: continue
if d.endswith('.zip'): continue
zf.write(d,'%s/%s' % (pluginpath,d))
zf.close()
print "Plugin packaged at %s" % pluginzip
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
package_plugin(manifest,mf)
sys.exit(0)
|
jehama/OSINThadoop | FieldModifiers/edit_heartbleed_timestamp.py | Python | mit | 4,926 | 0.005075 | # --------------------------------------------------------------------------------------------
# Copyright (c) jehama. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#!/usr/bin/env python3
# pylint: disable=C0301
"""Creates a universal timestamp field for hearthbleed results,
so Kibana can create a time based index of it."""
# Imports
import datetime
import json
import os
# import sys
from pathlib import Path
# A simple message to notify the user about the start of the script.
print("---Hearthbleed timestamp editor---")
# Loop through a list of files in the directory
for filename in os.listdir("./"):
# Check if the file is a JSON file and it contains heartbleed in the name.
if filename.endswith(".json") and "heartbleed" in filename:
# Before editing print the name of the file that will be edited.
print("Editing {0}...".format(filename))
# Make preparations with regard to the the filenames, counters and the desired date format.
input_file = Path(filename)
output_file = Path(filename[:-5] + "-edited.json")
total_count = 0
missing_count = 0
new_date_format = "%Y-%m-%d %H:%M:%S:%f"
# Select the file to be used or the output.
with output_file.open("w") as outfile:
# Loop through every JSON element in the input file.
for element in input_file.open("r"):
# Update a counter, reset the date field and load in the next JSON element.
total_count += 1
date = None
json_element = json.loads(element)
# If the loaded element does not contain the right json_element,
# or if the json_element contains a different date format then a KeyError is thrown.
try:
# Create a new json_element with the same value as the one already known.
json_element["custom.heartbleed.timestamp"] = json_element["source2.443"]["https"]["heartbleed"]["timestamp"]
old_date_format = "%Y-%m-%d %H:%M:%S UTC"
# To convert the format of the timestamp the timestamp is converted to a datetime object.
# Then the datetime object is converted to a string following the desired format.
date = datetime.datetime.strptime(json_element["custom.heartbleed.timestamp"], old_date_format)
new_date = datetime.datetime.strftime(date, new_date_format)
# The value of the new json_element is then updated with the timestamp string.
json_element["custom.heartbleed.timestamp"] = new_date
except KeyError:
try:
# This json_element can contain two different timestamp formats.
| # So a second try is used to check for the second format if the first one is incorrect.
json_element["custom.heartbleed.timesta | mp"] = json_element["source1.timestamp"]
old_date_format = "%Y-%m-%dT%H:%M:%S.%f"
try:
date = datetime.datetime.strptime(json_element["custom.heartbleed.timestamp"], old_date_format)
except ValueError:
date = datetime.datetime.strptime(json_element["custom.heartbleed.timestamp"], "%Y-%m-%dT%H:%M:%S")
new_date = datetime.datetime.strftime(date, new_date_format)
json_element["custom.heartbleed.timestamp"] = new_date
except KeyError:
# If these statements are reached then that means that either the timestamp formats don't match,
# or that there is no corresponding timestamp field in the element.
missing_count += 1
continue
# Instead of simply noting the failed creation of the new json_element one can also choose stop the script.
# This is done by uncommenting the next line while commenting two instructions above.
# sys.exit("Field to edit is missing or it does not conform to the given timestamp formats." + "\n Exiting now...")
# Write updated element to the new file.
outfile.write(json.dumps(json_element) + "\n")
# A simple way of keeping visualising the progress, this can be uncommented if desired.
# print("\r" + str(total_count) + " done..", end="")
print("Total edited: {0}".format(total_count))
print("Total missed: {0}".format(missing_count))
print("Edited results stored in: {0}".format(output_file))
|
pythonprobr/pythonpro-website | pythonpro/memberkit/migrations/0002_create_relationship_with_payment_config_item.py | Python | agpl-3.0 | 1,292 | 0.004644 | # Generated by Django 3.2.4 on 2021-06-05 15:46
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_pagarme', '0004_pagarme_item_co | nfig_available_until'),
('memberkit', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='subscriptiontype',
options={'verbose_name': 'Tipo de Assinatura', 'verbose_name_plural': 'Tipos de Assinaturas'},
),
migrations.CreateModel(
name='PaymentItemConfigToSubscriptionType',
fields=[
('id', models.BigA | utoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('payment_item', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE,
related_name='subscription_type_relation',
to='django_pagarme.pagarmeitemconfig')),
('subscription_type',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='payment_items_relation',
to='memberkit.subscriptiontype')),
],
),
]
|
cloud-ark/cloudark | server/gcloud_handler.py | Python | apache-2.0 | 4,809 | 0.001248 | import ast
from os.path import expanduser
from stevedore import extension
from common import common_functions
from common import fm_logger
from dbmodule.objects import app as app_db
from dbmodule.objects import environment as env_db
from server.server_plugins.gcloud import gcloud_helper
home_dir = expanduser("~")
APP_AND_ENV_STORE_PATH = ("{home_dir}/.cld/data/deployments/").format(home_dir=home_dir)
fmlogger = fm_logger.Logging()
class GCloudHandler(object):
res_mgr = extension.ExtensionManager(
namespace='server.server_plugins.gcloud.resource',
invoke_on_load=True,
)
coe_mgr = extension.ExtensionManager(
namespace='server.server_plugins.gcloud.coe',
invoke_on_load=True,
)
app_mgr = extension.ExtensionManager(
namespace='server.server_plugins.gcloud.app',
invoke_on_load=True,
)
gcloudhelper = gcloud_helper.GCloudHelper()
def create_resources(self, env_id, resource_list):
fmlogger.debug("GCloudHandler create_resources")
resource_details = ''
ret_status_list = []
for resource_defs in resource_list:
resource_details = resource_defs['resource']
type = resource_details['type']
env_db.Environment().update(env_id, {'status': 'creating_' + type})
for name, ext in GCloudHandler.res_mgr.items():
if name == type:
status = ext.obj.create(env_id, resource_details)
if status: ret_status_list.append(status)
return ret_status_list
def delete_resource(self, env_id, resource):
fmlogger.debug("GCloudHandler delete_resource")
type = resource.type
env_db.Environment().update(env_id, {'status': 'deleting_' + type})
for name, ext in GCloudHandler.res_mgr.items():
if name == type:
ext.obj.delete(resource)
def run_command(self, env_id, env_name, resource, command_string):
fmlogger.debug("GCloudHandler run_command")
type = resource.type
command_type = GCloudHandler.gcloudhelper.resource_type_for_command(command_string)
command_output_all = []
for name, ext in GCloudHandler.res_mgr.items():
if name == type:
if name == command_type or command_string == 'help':
command_output = ext.obj.run_command(env_id, env_name, resource, command_string)
command_output_all.extend(command_output)
coe_type = common_functions.get_coe_type(env_id)
for name, ext in GCloudHandler.coe_mgr.items():
if name == coe_type:
if name == command_type or command_string == 'help':
command_output = ext.obj.run_command(env_id, env_name, resource, command_string)
command_output_all.extend(command_output)
return command_output_all
def create_cluster(self, env_id, env_info):
coe_type = common_functions.get_coe_type(env_id)
for name, ext in GCloudHandler.coe_mgr.items():
if name == coe_type:
status = ext.obj.create_cluster(env_id, env_info)
return status
def delete_cluster(self, env_id, env_info, resource):
coe_type = common_functions.get_coe_type(env_id)
for name, ext in GCloudHandler.coe_mgr.items():
if name == coe_type:
ext.obj.delete_cluster(env_id, env_info, resource)
def create_container(self, | cont_name, cont_info):
repo_type = cont_info['dep_target']
for name, ext in GCloudHandler.res_mgr.items():
if name | == repo_type:
ext.obj.create(cont_name, cont_info)
def delete_container(self, cont_name, cont_info):
repo_type = cont_info['dep_target']
for name, ext in GCloudHandler.res_mgr.items():
if name == repo_type:
ext.obj.delete(cont_name, cont_info)
# App functions
def deploy_application(self, app_id, app_info):
app_type = common_functions.get_app_type(app_id)
for name, ext in GCloudHandler.app_mgr.items():
if name == app_type:
ext.obj.deploy_application(app_id, app_info)
def delete_application(self, app_id, app_info):
app_type = common_functions.get_app_type(app_id)
for name, ext in GCloudHandler.app_mgr.items():
if name == app_type:
ext.obj.delete_application(app_id, app_info)
def get_logs(self, app_id, app_info):
log_lines = ''
app_type = common_functions.get_app_type(app_id)
for name, ext in GCloudHandler.app_mgr.items():
if name == app_type:
log_lines = ext.obj.get_logs(app_id, app_info)
return log_lines |
fxdemolisher/frano | frano/templatetags/frano_filters.py | Python | mit | 2,315 | 0.015119 | from django import template
from django.template.defaultfilters import stringfilter
import locale
register = template.Library()
#-----------\
# FILTERS |
#-----------/
@register.filter
@stringfilter
def num_format(value, places = 2, min_places = 2):
return format(value, '.', int(places), 3, ',', int(min_places))
@register.filter
@stringfilter
def sign_choice(value, args = 'positive,negative,zero'):
positive, negative, zero = args.split(',')
value = float(value)
if value == 0:
return zero
elif value > 0:
return positive
else:
return negative
@register.filter
def sorted_set(value):
return sorted(value)
#-------------\
# UTILITIES |
#-------------/
def format(number, decimal_sep, decimal_pos, grouping=0, thousand_sep='', min_decimal_pos = None):
"""
NOTE: taken from django 1.2.3 source
Gets a number (as a number or string), and returns it as a string,
using formats definied as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal p | ositions
* grouping: Number of digits in every group limited by thousand separator
* thousand_sep: Thousand separator symbol (for example ",")
"""
# sign
if float(number) < 0:
sign = '-'
else:
sign = ''
# decimal part
str_number = unicode("%.8f" % float(number))
if str_number[0] == '-':
str_number = str_number[1:]
if '.' in str_number:
int_part, dec_part = str_num | ber.split('.')
if decimal_pos:
dec_part = dec_part[:decimal_pos]
else:
int_part, dec_part = str_number, ''
# do not zero pad when its not needed
#if decimal_pos:
# dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
# zero pad to minimum decimal positions
if min_decimal_pos:
dec_part = dec_part + ('0' * (min_decimal_pos - len(dec_part)))
if dec_part: dec_part = decimal_sep + dec_part
# grouping
if thousand_sep != '' and grouping:
int_part_gd = ''
for cnt, digit in enumerate(int_part[::-1]):
if cnt and not cnt % grouping:
int_part_gd += thousand_sep
int_part_gd += digit
int_part = int_part_gd[::-1]
return sign + int_part + dec_part
|
stlpublicradio/ferguson-project | fabfile/flat.py | Python | mit | 3,519 | 0.002558 | #!/usr/bin/env python
import copy
from cStringIO import StringIO
from fnmatch import fnmatch
import gzip
import hashlib
import mimetypes
import os
import boto
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
import app_config
GZIP_FILE_TYPES = ['.html', '.js', '.json', '.css', '.xml']
class FakeTime:
def time(self):
return 1261130520.0
# Hack to override gzip's time implementation
# See: http://stackoverflow.com/questions/264224/setting-the-gzip-timestamp-from-python
gzip.time = FakeTime()
def deploy_file(connection, src, dst, headers={}):
"""
Deploy a single file to S3, if the local version is different.
"""
bucket = connection.get_bucket(app_config.S3_BUCKET['bucket_name'])
k = bucket.get_key(dst)
s3_md5 = None
if k:
s3_md5 = k.etag.strip('"')
else:
k = Key(bucket)
k.key = dst
file_headers = copy.copy(headers)
if 'Content-Type' not in headers:
file_headers['Content-Type'] = mimetypes.guess_type(src)[0]
# Gzip file
if os.path.splitext(src)[1].lower() in GZIP_FILE_TYPES:
file_headers['Content-Encoding'] = 'gzip'
with open(src, 'rb') as f_in:
contents = f_in.read()
output = StringIO()
f_out = gzip.GzipFile(filename=dst, mode='wb', fileobj=output)
f_out.write(contents)
f_out.close()
local_md5 = hashlib.md5()
local_md5.update(output.getvalue())
local_md5 = local_md5.hexdigest()
if local_md5 == s3_md5:
print 'Skipping %s (has not changed)' % src
else:
print 'Uploading %s --> %s (gzipped)' % (src, dst)
k.set_contents_from_string(output.getvalue(), file_headers, policy='public-read')
# Non-gzip file
else:
with open(src, 'rb') as f:
local_md5 = hashlib.md5()
local_md5.update(f.read())
local_md5 = local_md5.hexdigest()
if local_md5 == s3_md5:
print 'Skipping %s (has not changed)' % src
else:
print 'Uploading %s --> %s' % (src, dst)
k.set_contents_from_filename(src, file_headers, policy='public-read')
def deploy_folder(src, dst, headers={}, ignore=[]):
"""
Deploy a folder to S3, checking each file to see if it has changed.
"""
to_deploy = []
for local_path, subdirs, filenames in os.walk(src, topdown=True):
rel_path = os.path.relpath(local_path, src)
for name in filenames:
if name.startswith('.'):
continue
src_path = os.path.join(local_path, name)
| skip = False
for pattern in ignore:
if fnmatch(src_ | path, pattern):
skip = True
break
if skip:
continue
if rel_path == '.':
dst_path = os.path.join(dst, name)
else:
dst_path = os.path.join(dst, rel_path, name)
to_deploy.append((src_path, dst_path))
s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())
for src, dst in to_deploy:
deploy_file(s3, src, dst, headers)
def delete_folder(dst):
"""
Delete a folder from S3.
"""
s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())
bucket = s3.get_bucket(app_config.S3_BUCKET['bucket_name'])
for key in bucket.list(prefix='%s/' % dst):
print 'Deleting %s' % (key.key)
key.delete()
|
Zulko/pompei | examples/typical_script.py | Python | mit | 4,324 | 0.006475 | """
This is a ty | pical script to reconstruct one frame of a movie using a mosaic
of other frames with the Python package Pompei. It generates this picture of
general Maximus in Gladiator using 1100+ frames of the movie.
http://i.imgur.com/Eoglcof.jpg
This script goes in five steps:
1. Extract one frame every 5 second of the movie. Compute their 'signatures'
2. Extract one special frame (the one to be reconstructed) from the movie.
3. Split th | is frame into subregions and compute the signature of each region.
4. Run an algorithm to find (using the signatures) wich frames of the movie
match best with the different regions of the picture to reconstruct.
The algorithm also ensures that many different frames are used.
5. Assemble the selected best-matching frames into one big picture and save.
The code is well commented to paliate for the lack of documentation. For more,
see the functions doctrings.
"""
from pompei import (movie_to_folder,
get_image_signatures_from_folder,
compute_signatures_in_image,
find_best_matches,
best_matches_to_image)
# When comparing the frames of the movie to the regions of the picture to
# reconstruct, each frame and each region will be reduced to Nh x Nw
# zones from which the mean colors are computed. Here we choose 3 x 3.
# The resulting set of 9 colors is called the signature of the region/frame.
signatures_nh=3
signatures_nw=3
### STEP 1 - EXTRACTING THE FRAMES OF THE MOVIE
# For this example we treat gladiator. The result is this mosaic
# http://i.imgur.com/Eoglcof.jpg
foldername = "gladiator" # name of the folder for the frame pictures
filename = 'gladiator.flv' # the video file, from a legally-baught DVD
# The next call extracts the frames from the movie. At the same time it computes
# the signatures of the frames and store them in file gladiator/signatures.txt
# It's pretty long (5 minutes) and should only be done once, then you can
# comment it out if you want to fine-tune the parameters in the next lines.
image_folder_signatures = movie_to_folder(filename, foldername,
fps=1.0/5, # take one frame every 5 seconds
resize_factor=0.2, # downsize all frames of a factor 1/5
signatures_nh=signatures_nh,
signatures_nw=signatures_nw,
subclip=(5*60,-10*60)) # cut 5-10 minutes to avoid credits.
# Get the signatures of each frame, already computed at the previous step.
image_folder_signatures = get_image_signatures_from_folder(foldername)
### STEP 2 - READING THE IMAGE TO BE RECONSTRUCTED
# Now we load the image to reconstruct. This could be any image but out of
# simplicity we choose one frame frame of the movie, so that it will have the
# same dimensions as the frames that will compose it.
# We take the scene just before "My name is Maximus...".
import moviepy.editor as mpy
image = mpy.VideoFileClip(filename).get_frame('01:26:43.00') # a numpy array.
### STEP 3 - SPLIT THE IMAGE AND COMPUTE THE SIGNATURES OF THE REGIONS
nh = nw = 60
image_signatures = compute_signatures_in_image(image, signatures_nh,
signatures_nw, nh, nw)
### STEP 4 - FIND THE BEST-MATCHING FRAMES. OPTIMIZE.
# This step is quite quick because we work with signatures (i.e. reduced
# version of the images.
# The algorithm first attributes to each region of the final picture the movie
# frame that matches best. Some frames will be used more than once.
# Then, goal=5 means that the algorithm will iteratively diversify the frames
# used until the most used frames is used 5 times or less.
# npasses=3000 tells the algorithm to give up after 3000 iterations if it
# cannot reach its goal of 5. Choosing a lower npasses (like npasses=100) can be
# good sometimes to avoid over-diversification.
best_matches = find_best_matches(image_signatures, image_folder_signatures,
npasses=3000,goal=5)
### STEP 5 - ASSEMBLE THE FRAMES INTO ONE BIG PNG FILE
# This produces the final picture: gladiator.png
# This will take long and produce a heavy PNG (50Mo) which can then be
# downsized by converting it to JPEG.
best_matches_to_image("%s.png"%foldername, best_matches, foldername) |
r39132/airflow | tests/test_utils.py | Python | apache-2.0 | 3,597 | 0.001112 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import unittest
import airflow.utils.logging
from airflow import configuration
from airflow.exceptions import AirflowException
from airflow.utils.operator_resources import Resources
class LogUtilsTest(unittest.TestCase):
def test_gcs_url_parse(self):
"""
Test GCS url parsing
"""
logging.info(
'About to create a GCSLog object without a connection. This will '
'log an error but testing will proceed.')
glog = airflow.utils.logging.GCSLog()
self.assertEqual(
glog.parse_gcs_url('gs | ://bucket/path/to/blob'),
('bucket', 'path/to/blob'))
# invalid URI
self.assertRaises(
AirflowException,
glog.parse_gcs_url,
'gs:/bucket/path/to/blob')
# trailing slash
self.assertEqual(
glog.parse_gcs_url('gs://bucket/path/to/blob/'),
('bucket', 'path/to/blob'))
# bucket only
self.assertEqual(
glog.parse_gcs_url('gs://buc | ket/'),
('bucket', ''))
class OperatorResourcesTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_all_resources_specified(self):
resources = Resources(cpus=1, ram=2, disk=3, gpus=4)
self.assertEqual(resources.cpus.qty, 1)
self.assertEqual(resources.ram.qty, 2)
self.assertEqual(resources.disk.qty, 3)
self.assertEqual(resources.gpus.qty, 4)
def test_some_resources_specified(self):
resources = Resources(cpus=0, disk=1)
self.assertEqual(resources.cpus.qty, 0)
self.assertEqual(resources.ram.qty,
configuration.conf.getint('operators', 'default_ram'))
self.assertEqual(resources.disk.qty, 1)
self.assertEqual(resources.gpus.qty,
configuration.conf.getint('operators', 'default_gpus'))
def test_no_resources_specified(self):
resources = Resources()
self.assertEqual(resources.cpus.qty,
configuration.conf.getint('operators', 'default_cpus'))
self.assertEqual(resources.ram.qty,
configuration.conf.getint('operators', 'default_ram'))
self.assertEqual(resources.disk.qty,
configuration.conf.getint('operators', 'default_disk'))
self.assertEqual(resources.gpus.qty,
configuration.conf.getint('operators', 'default_gpus'))
def test_negative_resource_qty(self):
with self.assertRaises(AirflowException):
Resources(cpus=-1)
|
maxamillion/ansible | lib/ansible/plugins/inventory/auto.py | Python | gpl-3.0 | 2,372 | 0.005481 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: auto
author:
- Matt Davis (@nitzmahone)
version_added: "2.5"
short_description: Loads and executes an inventory plugin specified in a YAML config
description:
- By whitelisting C(auto) inventory plugin, any YAML inventory | config file with a
C(plugin) key at its root will automatically cause the named plugin to be loaded and executed with that
config. This effectively provides automatic whitelisting of all installed/accessible inventory plugins.
- To disable this behavior, remove C(auto) from the C(INVENTORY_ENABLED) config element.
'''
EXAMPLES = '''
# This plugin is not intended for d | irect use; it is a fallback mechanism for automatic whitelisting of
# all installed inventory plugins.
'''
from ansible.errors import AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin
from ansible.plugins.loader import inventory_loader
class InventoryModule(BaseInventoryPlugin):
NAME = 'auto'
def verify_file(self, path):
if not path.endswith('.yml') and not path.endswith('.yaml'):
return False
return super(InventoryModule, self).verify_file(path)
def parse(self, inventory, loader, path, cache=True):
config_data = loader.load_from_file(path, cache=False)
try:
plugin_name = config_data.get('plugin', None)
except AttributeError:
plugin_name = None
if not plugin_name:
raise AnsibleParserError("no root 'plugin' key found, '{0}' is not a valid YAML inventory plugin config file".format(path))
plugin = inventory_loader.get(plugin_name)
if not plugin:
raise AnsibleParserError("inventory config '{0}' specifies unknown plugin '{1}'".format(path, plugin_name))
if not plugin.verify_file(path):
raise AnsibleParserError("inventory config '{0}' could not be verified by plugin '{1}'".format(path, plugin_name))
plugin.parse(inventory, loader, path, cache=cache)
try:
plugin.update_cache_if_changed()
except AttributeError:
pass
|
celery/cyme | cyme/settings.py | Python | bsd-3-clause | 1,738 | 0 | """Since cyme works as a contained Django APP, this is the default settings
file used when cyme is used outside of a Django project context."""
from __future__ import absolute_import
import os
import djcelery
djcelery.setup_loader()
DEBUG = True
# Broker settings.
BROKER_HOST = 'amqp://127.0.0.1:5672//'
BROKER_POOL_LIMIT = 100
CELERYD_LOG_FORMAT = """\
[%(asctime)s: %(levelname)s] %(message)s\
""".strip()
DB_NAME = os.environ.get('CYME_DB_NAME') or 'branch.db'
# Databases
DATABASES = {'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': DB_NAME,
}}
# URL and file paths.
SITE_ID = 1
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/adminstatic/'
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
ROOT_URLCONF = 'cym | e.api.urls'
# Time and localization.
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
USE_I18N = USE_L10N = True
# Apps and middleware.
INSTALLED_APPS = ('django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'cyme', # cyme must be before admin.
'cyme.api',
'django.contrib.admin',
| 'django.contrib.admindocs')
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '9a3!m32h23psjjkkjl#()hs+-sv@$3*mgq!m3s!encow2&*738'
|
jnewland/home-assistant | homeassistant/components/mysensors/binary_sensor.py | Python | apache-2.0 | 1,398 | 0 | """Support for MySensors binary sensors."""
from homeassistant.components import mysensors
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES, DOMAIN, BinarySensorDevice)
from homeassistant.const import STATE_ON
SENSORS = {
'S_DOOR': 'door',
'S_MOTION': 'motion',
'S_SMOKE': 'smoke',
'S_SPRINKLER': 'safety',
'S_WATER_LEAK': 'safety',
'S_SOUND': 'sound',
'S_VIBRATION': 'vibration',
'S_MOISTURE': 'moisture',
}
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the mysensors platform for binary sensors."""
| mysenso | rs.setup_mysensors_platform(
hass, DOMAIN, discovery_info, MySensorsBinarySensor,
async_add_entities=async_add_entities)
class MySensorsBinarySensor(
mysensors.device.MySensorsEntity, BinarySensorDevice):
"""Representation of a MySensors Binary Sensor child node."""
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return self._values.get(self.value_type) == STATE_ON
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
pres = self.gateway.const.Presentation
device_class = SENSORS.get(pres(self.child_type).name)
if device_class in DEVICE_CLASSES:
return device_class
return None
|
lbouma/Cyclopath | pyserver/item/item_user_watching.py | Python | apache-2.0 | 18,252 | 0.014464 | # Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
import traceback
import conf
import g
from grax.access_level import Access_Level
from gwis.exception.gwis_nothing_found import GWIS_Nothing_Found
from item import item_base
from item import item_user_access
from item.util import revision
from item.util.item_type import Item_Type
from item.util.watcher_frequency import Watcher_Frequency
log = g.log.getLogger('item_user_watchn')
class One(item_user_access.One):
item_type_id = None
#item_type_id = Item_Type.ITEM_USER_WATCHING
item_type_table = None
item_gwis_abbrev = None # A derived class will override this.
#item_gwis_abbrev = 'iuw'
# Set child_item_types to None since our parent class set it to an
# empty collection, but we don't want to allow callers to specify
# this class's item type to get items.
child_item_types = None
local_defns = [
]
attr_defns = item_user_access.One.attr_defns + local_defns
psql_defns = item_user_access.One.psql_defns + local_defns
gwis_defns = item_base.One.attr_defns_reduce_for_gwis(attr_defns)
#
cols_copy_nok = item_user_access.One.cols_copy_nok + []
__slots__ = [] + [attr_defn[0] for attr_defn in local_defns]
# *** Constructor
def __init__(self, qb=None, row=None, req=None, copy_from=None):
item_user_access.One.__init__(self, qb, row, req, copy_from)
# ***
# ***
class Many(item_user_access.Many):
one_class = One
__slots__ = ()
def __init__(self):
item_user_access.Many.__init__(self)
#
def qb_join_item_event_alert(self, qb):
log.error(
'FIXME: BUG nnnn: qb_join_item_event_alert: not implemented')
# and the table is empty, too.
if qb.sql_clauses.inner.join.find('item_event_read') == -1:
if qb.username != conf.anonymous_username:
qb.sql_clauses.inner.select += (
"""
, ievt.messaging_id AS item_read_id
"""
)
qb.sql_clauses.inner.join += (
"""
LEFT OUTER JOIN item_event_alert AS ievt
ON (gia.item_id = ievt.item_id)
| """)
qb.sql_clauses.inner.group_by += (
"""
, ievt.messaging_id
"""
)
else:
qb.sql_clauses.inner.select += (
| """
, NULL AS item_read_id
"""
)
qb.sql_clauses.outer.shared += (
"""
, group_item.item_read_id
"""
)
#
def qb_join_item_event_read(self, qb):
g.assurt(False) # Deprecated.
# See: qb_add_item_event_read. We should just select in the outer.
log.error('qb_join_item_event_read: Deprecated')
if qb.sql_clauses.inner.join.find('item_event_read') == -1:
if qb.username != conf.anonymous_username:
qb.sql_clauses.inner.select += (
"""
, itrd.id AS itrd_event_id
"""
)
qb.sql_clauses.inner.join += (
"""
LEFT OUTER JOIN item_event_read AS itrd
ON ((gia.item_id = itrd.item_id)
AND (itrd.username = %s))
""" % (qb.db.quoted(qb.username),))
qb.sql_clauses.inner.group_by += (
"""
, itrd.id
"""
)
# If we joined using stack_id and not also version and branch_id,
# we'd want to order by revision ID:
# qb.sql_clauses.inner.order_by += (
# # Order by latest read event: we can use event id or rev id.
# # I.e., the following is effectively same as: itrd.id DESC
# """
# , itrd.revision_id DESC
# """
# )
#
# Argh. [lb] wants to deprecate this fcn: it justs add to an
# alreayd long join chain, and by joining, and since we add
# multiple rows for the same system ID, it maybe makes more
# sense to do an outer select fcn...
else:
qb.sql_clauses.inner.select += (
"""
, NULL AS itrd_event_id
"""
)
qb.sql_clauses.outer.shared += (
"""
, group_item.itrd_event_id
"""
)
#
def qb_add_item_event_read(self, qb):
qb.sql_clauses.outer.enabled = True
# MAYBE: This seems inefficient. Maybe qb can track what's been added,
# instead of searching strings all the time.
if qb.sql_clauses.outer.group_by.find('user_has_read_item') == -1:
qb.sql_clauses.outer.select += (
"""
, CASE
WHEN EXISTS(SELECT id FROM item_event_read
WHERE item_id = group_item.system_id
AND username = %s
LIMIT 1) THEN TRUE
ELSE NULL END AS user_has_read_item
""" % (qb.db.quoted(qb.username),))
qb.sql_clauses.outer.group_by += (
"""
, user_has_read_item
""")
#
def sql_apply_query_filters(self, qb, where_clause="", conjunction=""):
g.assurt((not conjunction) or (conjunction == "AND"))
if qb.filters.filter_by_unread:
# User must be logged in. Client should prevent this.
g.assurt(qb.username and (qb.username != conf.anonymous_username))
# BUG nnnn: Display alerts in the client.
# Questions: Would we still send digest item watcher emails?
# How would you finish designing item_event_alert table?
# - Define the different msg_type_id types.
#
# For now, we use the item_event_read table, which is basically
# the thread_read_event table from CcpV1, but now it works on
# any item type. The client can ask that we return only items
# that a user has not read, or it can ask us to mark what's
# been read and what's not been read.
#
# The first implementation was a join:
#
# self.qb_join_item_event_read(qb)
# # Or, using the new, unimplemented item_event_alert table:
# # self.qb_join_item_event_alert(qb)
#
# But that creates two problems: 1., we already join a ton of tables,
# which ends up impacting SQL performance, and 2., the server saves
# multiple read events for the same item (same system ID), so the join
# could cause a magnifying effect on the number of rows fetched in the
# inner query. It seems to make more sense to run an EXISTS in the
# outer SELECT. This causes one more SQL statement for every row
# fetched... but how bad can it be?
#
# This is the code used when joing item_event_read:
#
# # Look for items that have no read record, or whose record is old.
# # We checked that the record belongs to the user in the join, so
# # we just check that a record doesn't exist or that it's rev_id is
# # dated.
# #
# # NOTE: Since we're using system IDs, we shouldn't need to look at
# # revision IDs (or versions). So, this is not necessary:
# # overkill: ((itrd.id IS NULL)
# # OR (itrd.revision_id < gia.valid_start_rid))
# where_clause += (
# """
# %s (itrd.id IS NULL)
# """ % (conjunction,))
# conjunction = "AND"
#
# And this is untested code for use when joining item_event_alert:
#
# where_clause += (
# """
# %s
# (ievt.messaging_id IS NOT NULL)
# AND (ievt.username = %s)
# AND (ievt.date_alerted IS NOT NULL)
# AND (ievt.msg_type_id = ??? /* none defined ye |
Rogentos/legacy-anaconda | iw/bootloader_main_gui.py | Python | gpl-2.0 | 8,647 | 0.004048 | #
# bootloader_main_gui.py: gui bootloader configuration dialog
#
# Copyright (C) 2001, 2002 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Jeremy Katz <katzj@redhat.com>
#
import gtk
import gobject
import gui
from iw_gui import *
from constants import *
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
from osbootwidget import OSBootWidget
from blpasswidget import BootloaderPasswordWidget
class MainBootloaderWindow(InstallWindow):
windowTitle = N_("Boot Loader Configuration")
def __init__(self, ics):
InstallWindow.__init__(self, ics)
self.parent = ics.getICW().window
def getPrev(self):
pass
def getNext(self):
# go ahead and set the device even if we already knew it
# since that won't change anything
self.bl.setDevice(self.bldev)
self.bl.drivelist = self.driveorder
if not self.grubCB.get_active():
# if we're not installing a boot loader, don't show the second
# screen and don't worry about other options
self.dispatch.skipStep("instbootloader", skip = 1)
# kind of a hack...
self.bl.defaultDevice = None
return
else:
self.dispatch.skipStep("instbootloader", skip = 0)
self.bl.setUseGrub(1)
# set the password
self.bl.setPassword(self.blpass.getPassword(), isCrypted = 0)
# set the bootloader images based on what's in our list
self.oslist.setBootloaderImages()
def bootloaderChanged(self, *args):
active = self.grubCB.get_active()
for widget in [ self.oslist.getWidget(), self.blpass.getWidget(), self.deviceButton ]:
widget.set_sensitive(active)
def _deviceChange(self, b, anaconda, *args):
def __driveChange(combo, dxml, choices):
if not choices.has_key("mbr"):
return
iter = combo.get_active_iter()
if not iter:
return
first = combo.get_model()[iter][1]
desc = choices["mbr"][1]
dxml.get_widget("mbrRadio").set_label("%s - /dev/%s" %(_(desc), first))
dxml.get_widget("mbrRadio").set_data("bootDevice", first)
def __genStore(combo, disks, active):
model = gtk.TreeStore(gobject.TYPE_STRING, gobject.TYPE_STRING)
combo.set_model(model)
cell = gtk.CellRendererText()
combo.pack_start(cell, True)
combo.set_attributes(cell, text = 0)
for disk in disks:
i = model.append(None)
model[i] = ("%s %8.0f MB %s" %(disk.name, disk.size,
disk.description),
"%s" %(disk.name,))
if disk.name == active:
combo.set_active_iter(i)
return model
(dxml, dialog) = gui.getGladeWidget("blwhere.glade",
"blwhereDialog")
gui.addFrame(dialog)
dialog.set_transient_for(self.parent)
dialog.show()
choices = anaconda.platform.bootloaderChoices(self.bl)
for t in ("mbr", "boot"):
if not choices.has_key(t):
continue
(device, desc) = choices[t]
w = dxml.get_widget("%sRadio" %(t,))
w.set_label("%s - /dev/%s" %(_(desc), device))
w.show()
if self.bldev == device:
w.set_active(True)
else:
w.set_active(False)
w.set_data("bootDevice", device)
for i in range(1, 5):
if len(self.driveorder) < i:
break
combo = dxml.get_widget("bd%dCombo" %(i,))
lbl = dxml.get_widget("bd%dLabel" %(i,))
combo.show()
lbl.show()
partitioned = anaconda.storage.partitioned
disks = anaconda.storage.disks
bl_disks = [d for d in disks if d in partitioned]
m = __genStore(combo, bl_disks, self.driveorder[i - 1])
dxml.get_widget("bd1Combo").connect("changed", __driveChange, dxml, choices)
__driveChange(dxml.get_widget("bd1Combo"), dxml, choices)
while 1:
rc = dialog.run()
if rc in [gtk.RESPONSE_CANCEL, gtk.RESPONSE_DELETE_EVENT]:
break
# set the boot device based on what they chose
if dxml.get_widget("bootRadio").get_active():
self.bldev = dxml.get_widget("bootRadio").get_data("bootDevice")
elif dxml.get_widget("mbrRadio").get_active():
self.bldev = dxml.get_widget("mbrRadio").get_data("bootDevice")
else:
raise RuntimeError, "No radio button selected!"
# and adjust the boot order
neworder = []
for i in range(1, 5):
if len(self.driveorder) < i:
break
combo = dxml.get_widget("bd%dCombo" %(i,))
iter = combo.get_active_iter()
if not iter:
continue
act = combo.get_model()[iter][1]
if act not in neworder:
neworder.append(act)
for d in self.driveorder:
if d not in neworder:
neworder.append(d)
self.driveorder = neworder
break
dialog.destroy()
self.grubCB.set_label(_("_Install boot loader on /dev/%s.") %
(self.bldev,))
return rc
def _setBLCBText(self):
self.grubCB.set_label(_("_Ins | tall boot loader on /dev/%s.") %
(self.bldev,))
def getScreen(self, anaconda):
self.dispatch = anaconda.dispatch
self.bl = anaconda.bootloader
self.intf = anaconda.intf
self.driveorder = self.bl.drivelist
if len(self.driveorder) == 0:
| partitioned = anaconda.storage.partitioned
disks = anaconda.storage.disks
self.driveorder = [d.name for d in disks if d in partitioned]
if self.bl.getPassword():
self.usePass = 1
self.password = self.bl.getPassword()
else:
self.usePass = 0
self.password = None
thebox = gtk.VBox (False, 12)
thebox.set_border_width(18)
# make sure we get a valid device to say we're installing to
if self.bl.getDevice() is not None:
self.bldev = self.bl.getDevice()
else:
# we don't know what it is yet... if mbr is possible, we want
# it, else we want the boot dev
choices = anaconda.platform.bootloaderChoices(self.bl)
if choices.has_key('mbr'):
self.bldev = choices['mbr'][0]
else:
self.bldev = choices['boot'][0]
hb = gtk.HBox(False, 12)
self.grubCB = gtk.CheckButton(_("_Install boot loader on /dev/%s.") %
(self.bldev,))
self.grubCB.set_active(not self.dispatch.stepInSkipList("instbootloader"))
self.grubCB.connect("toggled", self.bootloaderChanged)
hb.pack_start(self.grubCB, False)
self.deviceButton = gtk.Button(_("_Change device"))
self.deviceButton.connect("clicked", self._deviceChange, anaconda)
hb.pack_start(self.deviceButton, False)
thebox.pack_start(hb, False)
# control whether or not the |
pahaz/prospector | prospector/run.py | Python | gpl-2.0 | 4,952 | 0.000808 | from __future__ import absolute_import
import os.path
import sys
from datetime import datetime
from prospector import tools, blender, postfilter
from prospector.config import ProspectorConfig, configuration as cfg
from prospector.finder import find_python
from prospector.formatters import FORMATTERS
from prospector.message import Location, Message
__all__ = (
'Prospector',
'main',
)
class Prospector(object):
def __init__(self, config):
self.config = config
self.summary = None
self.messages = None
def process_messages(self, found_files, messages):
for message in messages:
if self.config.absolute_paths:
message.to_absolute_path(self.config.workdir)
else:
message.to_relative_path(self.config.workdir)
if self.config.blending:
messages = blender.blend(messages)
filepaths = found_files.iter_module_paths(abspath=False)
return postfilter.filter_messages(filepaths, self.config.workdir, messages)
def execute(self):
summary = {
'started': datetime.now(),
}
summary.update(self.config.get_summary_information())
found_files = find_python(self.config.ignores, self.config.paths,
self.config.explicit_file_mode, self.config.workdir)
# Run the tools
messages = []
for tool in self.config.get_tools(found_files):
try:
messages += tool.run(found_files)
except Exception: # pylint: disable=broad-except
if self.config.die_on_tool_error:
raise
else:
for name, cls in tools.TOOLS.items():
if cls == tool.__class__:
toolname = name
break
else:
toolname = 'Unknown'
loc = Location(self.config.workdir, None, None, None, None)
msg = 'Tool %s failed to run (exception was raised)' % (
toolname,
)
message = Message(
toolname,
'failure',
loc,
message=msg,
)
messages.append(message)
messages = self.process_messages(found_files, messages)
summary['message_count'] = len(messages)
summary['completed'] = datetime.now()
# Timedelta.total_seconds() is not available
# on Python<=2.6 so we calculate it ourselves
# See issue #60 and http://stackoverflow.com/a/3694895
delta = (summary['completed'] - summary['started'])
total_seconds = (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 1e6) / 1e6
summary['time_taken'] = '%0.2f' % total_seconds
self.summary = summary
self.messages = messages
def get_summary(self):
return self.summary
def get_messages(self):
return self.messages
def print_messages(self, write_to=None):
write_to = write_to or sys.stdout
output_format = self.config.get_output_format()
self.summary['formatter'] = output_format
formatter = FORMATTERS[output_format](self.summary, self.messages, self.config.profile)
# Produce the output
write_to.write(formatter.render(
summary=not self.config.messages_only,
messages=not self.config.summary_only,
profile=self.config.show_profile
))
write_to.write('\n')
def get_parser():
"""
This is a helper method to return an argparse parser, to
be used with the Sphinx argparse plugin for documentation.
"""
manager = cfg.build_manager()
source = cfg.build_command_line_source(prog='prospector', description=None)
return source.build_parser(manager.settings, None)
def main():
# Get our configuration
config = ProspectorConfig()
paths = config.paths
if len(paths) > 1 and not all([os.path.isfile(path) for path in paths]):
sys.stderr.write('\nIn multi-path mode, all inputs must be files, '
'not directories.\n\n')
get_parser().print_usage()
| sys.exit(2)
# Make it so
prospector = Prospector(co | nfig)
prospector.execute()
prospector.print_messages()
if config.exit_with_zero_on_success():
# if we ran successfully, and the user wants us to, then we'll
# exit cleanly
return 0
# otherwise, finding messages is grounds for exiting with an error
# code, to make it easier for bash scripts and similar situations
# to know if there any errors have been found.
if len(prospector.get_messages()) > 0:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
samrussell/sippy | sippy/ESipHeaderIgnore.py | Python | gpl-2.0 | 1,217 | 0 | # Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2007 Sippy Software, Inc. All rights reserved.
#
# This file is part of SIPPY, a free RFC3261 SIP stack and B2BUA.
#
# SIPPY is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# For a license to | use the SIPPY software under conditions
# other than those described here, or to purchase support for this
# software, please contact Sippy Softw | are, Inc. by e-mail at the
# following addresses: sales@sippysoft.com.
#
# SIPPY is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
class ESipHeaderIgnore(Exception):
def __init__(self):
Exception.__init__(self)
|
b0ttl3z/SickRage | lib/github/Organization.py | Python | gpl-3.0 | 29,379 | 0.00337 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Steve English <steve.english@navetas.com> #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import datetime
import github.GithubObject
import github.PaginatedList
import github.Plan
import github.Team
import github.Event
import github.Repository
import github.NamedUser
class Organization(github.GithubObject.CompletableGithubObject):
"""
This class represents Organizations. The refe | rence can be found here http://developer.github.com/v3/orgs/
"""
def __repr__(self):
return self.get__repr__({"id": self._id.value, "name": self._name.value})
@property
def | avatar_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._avatar_url)
return self._avatar_url.value
@property
def billing_email(self):
"""
:type: string
"""
self._completeIfNotSet(self._billing_email)
return self._billing_email.value
@property
def blog(self):
"""
:type: string
"""
self._completeIfNotSet(self._blog)
return self._blog.value
@property
def collaborators(self):
"""
:type: integer
"""
self._completeIfNotSet(self._collaborators)
return self._collaborators.value
@property
def company(self):
"""
:type: string
"""
self._completeIfNotSet(self._company)
return self._company.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def disk_usage(self):
"""
:type: integer
"""
self._completeIfNotSet(self._disk_usage)
return self._disk_usage.value
@property
def email(self):
"""
:type: string
"""
self._completeIfNotSet(self._email)
return self._email.value
@property
def events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._events_url)
return self._events_url.value
@property
def followers(self):
"""
:type: integer
"""
self._completeIfNotSet(self._followers)
return self._followers.value
@property
def following(self):
"""
:type: integer
"""
self._completeIfNotSet(self._following)
return self._following.value
@property
def gravatar_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._gravatar_id)
return self._gravatar_id.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def location(self):
"""
:type: string
"""
self._completeIfNotSet(self._location)
return self._location.value
@property
def login(self):
"""
:type: string
"""
self._completeIfNotSet(self._login)
return self._login.value
@property
def members_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._members_url)
return self._members_url.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def owned_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._owned_private_repos)
return self._owned_private_repos.value
@property
def plan(self):
"""
:type: :class:`github.Plan.Plan`
"""
self._completeIfNotSet(self._plan)
return self._plan.value
@property
def private_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._private_gists)
return self._private_gists.value
@property
def public_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_gists)
return self._public_gists.value
@property
def public_members_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._public_members_url)
return self._public_members_url.value
@property
def public_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_repos)
return self._public_repos.value
@property
def repos_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._repos_url)
return self._repos_url.value
@property
def total_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._total_private_repos)
return self._total_private_repos.value
@property
def type(self):
"""
:type: string
"""
self._completeIfNotSet(self._type)
return self._type.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def add_to_public_members(self, public_member):
"""
:calls: `PUT /orgs/:org/public_members/:user <http://developer.github.com/v3/orgs/members>`_
:param public_member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(public_member, github.NamedUser.NamedUser), public_member
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.url + "/public_members/" + public_member._identity
)
def create_fo |
diorcety/translate | translate/storage/test_mo.py | Python | gpl-2.0 | 4,281 | 0.003504 | import os
import subprocess
import sys
from io import BytesIO
from translate.storage import factory, mo, test_base
class TestMOUnit(test_base.TestTranslationUnit):
UnitClass = mo.mounit
def test_context(self):
unit = self.UnitClass("Message")
unit.setcontext('context')
assert unit.getcontext() == 'context'
posources = [
r'''
msgid ""
msgstr ""
"PO-Revision-Date: 2006-02-09 23:33+0200\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8-bit\n"
''',
r'''
msgid ""
msgstr ""
"PO-Revision-Date: 2006-02-09 23:33+0200\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8-bit\n"
msgid "plant"
msgstr ""
''',
# The following test is commented out, because the hash-size is different
# compared to gettext, since we're not counting untranslated units.
#r'''
#msgid ""
#msgstr ""
#"PO-Revision-Date: 2006-02-09 23:33+0200\n"
#"MIME-Version: 1.0\n"
#"Content-Type: text/plain; charset=UTF-8\n"
#"Content-Transfer-Encoding: 8-bit\n"
#
#msgid "plant"
#msgstr ""
#
#msgid ""
#"_: Noun\n"
#"convert"
#msgstr "bekeerling"
#''',
r'''
msgid ""
msgstr ""
"PO-Revision-Date: 2006-02-09 23:33+0200\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8-bit\n"
msgid "plant"
msgstr ""
msgid ""
"_: Noun\n"
"convert"
msgstr "bekeerling"
msgctxt "verb"
msgid ""
"convert"
msgstr "omskakel"
''',
r'''
msgid ""
msgstr ""
"PO-Revision-Date: 2006-02-09 23:33+0200\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8-bit\n"
msgid "plant"
msgstr ""
msgid ""
"_: Nou | n\n"
"convert"
msgstr "bekeerling"
msgctxt "verb"
msgid ""
"convert"
msgstr "omskakel"
msgid "tree"
msgid_plural "trees"
msgstr[0] ""
''',
] |
class TestMOFile(test_base.TestTranslationStore):
StoreClass = mo.mofile
def get_mo_and_po(self):
return (os.path.abspath(self.filename + '.po'),
os.path.abspath(self.filename + '.msgfmt.mo'),
os.path.abspath(self.filename + '.pocompile.mo'))
def remove_po_and_mo(self):
for file in self.get_mo_and_po():
if os.path.exists(file):
os.remove(file)
def setup_method(self, method):
test_base.TestTranslationStore.setup_method(self, method)
self.remove_po_and_mo()
def teardown_method(self, method):
test_base.TestTranslationStore.teardown_method(self, method)
self.remove_po_and_mo()
def test_language(self):
"""Test that we can return the target language correctly."""
store = self.StoreClass()
store.updateheader(add=True, Language="zu")
assert store.gettargetlanguage() == "zu"
def test_context(self):
store = self.StoreClass()
unit = self.StoreClass.UnitClass('source')
unit.target = 'target'
unit.setcontext('context')
store.addunit(unit)
assert b'context' in store.__bytes__()
def test_output(self):
for posource in posources:
print("PO source file")
print(posource)
PO_FILE, MO_MSGFMT, MO_POCOMPILE = self.get_mo_and_po()
posource = posource.encode('utf-8')
with open(PO_FILE, 'wb') as out_file:
out_file.write(posource)
subprocess.call(['msgfmt', PO_FILE, '-o', MO_MSGFMT])
subprocess.call(['pocompile', '--errorlevel=traceback', PO_FILE, MO_POCOMPILE])
store = factory.getobject(BytesIO(posource))
if store.isempty() and not os.path.exists(MO_POCOMPILE):
# pocompile doesn't create MO files for empty PO files, so we
# can skip the checks here.
continue
with open(MO_MSGFMT, 'rb') as mo_msgfmt_f:
mo_msgfmt = mo_msgfmt_f.read()
print("msgfmt output:")
print(repr(mo_msgfmt))
with open(MO_POCOMPILE, 'rb') as mo_pocompile_f:
mo_pocompile = mo_pocompile_f.read()
print("pocompile output:")
print(repr(mo_pocompile))
assert mo_msgfmt == mo_pocompile
|
titipata/pubmed_parser | pubmed_parser/utils.py | Python | mit | 4,118 | 0.001943 | import calendar
import collections
try:
from collections.abc import Iterable
except:
from collections import Iterable
from time import strptime
from six import string_types
from lxml import etree
from itertools import chain
def remove_namespace(tree):
"""
Strip namespace from parsed XML
"""
for node in tree.iter():
try:
has_namespace = node.tag.startswith("{")
except AttributeError:
continue # node.tag is not a string (node is a comment or similar)
if has_namespace:
node.tag = node.tag.split("}", 1)[1]
def read_xml(path, nxml=False):
"""
Parse tree from given XML path
"""
try:
tree = etree.parse(path)
if ".nxml" in path or nxml:
remove_namespace(tree) # strip namespace when reading an XML file
except:
try:
tree = etree.fromstring(path)
except Exception:
print(
"Error: it was not able to read a path, a file-like object, or a string as an XML"
)
raise
return tree
def stringify_children(node):
"""
Filters and removes possible Nones in texts and tails
ref: http://stackoverflow.com/questions/4624062/get-all-text-inside-a-tag-in-lxml
"""
parts = (
[node.text]
+ list(chain(*([c.text, c.tail] for c in node.getchildren())))
+ [node.tail]
)
return "".join(filter(None, parts))
def stringify_affiliation(node):
"""
Filters and removes possible Nones in texts and tails
ref: http://stackoverflow.com/questions/4624062/get-all-text-inside-a-tag-in-lxml
"""
parts = (
[node.text]
+ list(
chain(
*(
[c.text if (c.tag != "label" and c.tag != "sup") else "", c.tail]
for c in node.getchildren()
)
)
)
+ [node.tail]
)
return " ".join(filter(None, parts))
def stringify_affiliation_rec(node):
"""
Flatten and join list to string
ref: http://stackoverflow.com/questions/2158395/flat | ten-an-irregular-list-of-lists-in-python
"""
parts = | _recur_children(node)
parts_flatten = list(_flatten(parts))
return " ".join(parts_flatten).strip()
def _flatten(l):
"""
Flatten list into one dimensional
"""
for el in l:
if isinstance(el, Iterable) and not isinstance(el, string_types):
for sub in _flatten(el):
yield sub
else:
yield el
def _recur_children(node):
"""
Recursive through node to when it has multiple children
"""
if len(node.getchildren()) == 0:
parts = (
([node.text or ""] + [node.tail or ""])
if (node.tag != "label" and node.tag != "sup")
else ([node.tail or ""])
)
return parts
else:
parts = (
[node.text or ""]
+ [_recur_children(c) for c in node.getchildren()]
+ [node.tail or ""]
)
return parts
def month_or_day_formater(month_or_day):
"""
Parameters
----------
month_or_day: str or int
must be one of the following:
(i) month: a three letter month abbreviation, e.g., 'Jan'.
(ii) day: an integer.
Returns
-------
numeric: str
a month of the form 'MM' or a day of the form 'DD'.
Note: returns None if:
(a) the input could not be mapped to a known month abbreviation OR
(b) the input was not an integer (i.e., a day).
"""
if month_or_day.replace(".", "") in filter(None, calendar.month_abbr):
to_format = strptime(month_or_day.replace(".", ""), "%b").tm_mon
elif month_or_day.strip().isdigit() and "." not in str(month_or_day):
to_format = int(month_or_day.strip())
else:
return None
return ("0" if to_format < 10 else "") + str(to_format)
def pretty_print(node):
"""
Pretty print a given lxml node
"""
print(etree.tostring(node, pretty_print=True).decode("utf-8"))
|
yupbank/onekeyvpn | ensure.py | Python | bsd-3-clause | 690 | 0.015942 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
ensure.py
Author: yupbank
Email: yupbank@gmail.com
Created on
2013 | -09-30
'''
import sh
import os
import sys
def ubuntu_install(package):
return os.popen( | 'sudo apt-get install %s -y'%package)
OP_F = {
"Linux": ubuntu_install
}
OP_PACKAGE = {
"Linux": ['openswan', 'xl2tpd', 'ppp'][::-1]
}
def install_package(package, sys_type):
return OP_F[sys_type](package)
def main():
os_type = sh.uname().strip()
for p in OP_PACKAGE[os_type]:
for info in install_package(p, os_type):
sys.stdout.flush()
print info
#print sh.ifconfig()
if __name__ == '__main__':
main()
|
vecnet/vnetsource | datawarehouse/mixins.py | Python | mpl-2.0 | 1,115 | 0.007175 | from collections import OrderedDict
from django.http import HttpResponse
import simplejson
class JSONMixin(object):
"""This class was designed to be inherited and used to return JSON objects from an Ordered Dictionary
"""
## Ordered Dictionary used to create serialized JSON object
# return_rderedDic | t() #This will enforce the ordering that we recieve from the database
def __init__(self):
"""
Init function for the JSON Mixin class
"""
self.return_list=OrderedDict()
return
def render_to_response(self, context):
"""Extends default render to response to return serialized JSON.
"""
return self.get_json_response(self.convert_to_json())
def get_json_response(self, content, **httpresponse_kw | args):
"""Returns JSON to calling object in the form of an http response.
"""
return HttpResponse(content,content_type='application/json',**httpresponse_kwargs)
def convert_to_json(self):
"""Serialized the return_list into JSON
"""
return simplejson.dumps(self.return_list) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.