code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from config import *
if config.config_usr_info_storage is "sqlite":
import sqlite_store as key_storage
elif config.config_usr_info_storage is "mysql":
import mysql_store as key_storage
elif config.config_usr_info_storage is "file":
import file_store as key_storage
else :
import file_store as key_storage
class storage(key_storage.base_storage):
def __init__(self, usr=None, usr_key=None):
key_storage.base_storage.__init__(self, usr, usr_key)
if __name__ == '__main__':
pass
|
lifulong/account-manager
|
src/core/store.py
|
Python
|
gpl-2.0
| 538
|
#!/usr/bin/python
### github.com/bl4de | hackerone.com/bl4de ###
import sys
import hashlib
import urllib
import base64
description = """
hasher.py - hash string using SHA1, MD5, Base64, Hex, Encode URL etc.
usage: ./hasher.py [string_to_hash]
"""
def usage():
print description
exit(0)
def hex_encode(s):
enc = ''
for c in s:
enc = enc + (str(hex(ord(c))).replace('0x',''))
return enc
def main(s):
print "SHA1\t\t{}".format(hashlib.sha1(s).hexdigest())
print "MD5 \t\t{}".format(hashlib.md5(s).hexdigest())
print "Base64 \t\t{}".format(base64.b64encode(s))
print "URL-encoded \t{}".format(urllib.pathname2url(s))
print "HEX encoded \t{}".format(hex_encode(s))
if __name__ == "__main__":
if (len(sys.argv) == 2):
arguments = sys.argv[1:]
main(arguments[0])
else:
usage()
|
bl4de/security-tools
|
hasher.py
|
Python
|
mit
| 880
|
from django.db import models
from django.conf import settings
def get_path(instance, second):
print(second)
return instance.path
# Create your models here.
class Document(models.Model):
CAN_VIEW = 'can_view_document'
CAN_DELETE = 'can_delete_document'
CAN_UPDATE = 'can_change_document'
filename = models.CharField(max_length=100, default='')
path = models.CharField(max_length=100, default='')
description = models.TextField(default='')
processed_text = models.TextField(default='')
file = models.FileField(upload_to=get_path, default=None)
author = models.ForeignKey(settings.AUTH_USER_MODEL, default=-1)
class Meta:
permissions = (
('can_view_document', 'Can view document'),
('can_delete_document', 'Can delete document'),
('can_change_document', 'Can change document'),
)
|
apiaas/drawer-api
|
document/models.py
|
Python
|
apache-2.0
| 881
|
try:
unicode
except NameError:
raise ImportError
from pybench import Test
from string import join
class ConcatUnicode(Test):
version = 2.0
operations = 10 * 5
rounds = 60000
def test(self):
# Make sure the strings are *not* interned
s = unicode(join(map(str,range(100))))
t = unicode(join(map(str,range(1,101))))
for i in xrange(self.rounds):
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
t + s
def calibrate(self):
s = unicode(join(map(str,range(100))))
t = unicode(join(map(str,range(1,101))))
for i in xrange(self.rounds):
pass
class CompareUnicode(Test):
version = 2.0
operations = 10 * 5
rounds = 150000
def test(self):
# Make sure the strings are *not* interned
s = unicode(join(map(str,range(10))))
t = unicode(join(map(str,range(10))) + "abc")
for i in xrange(self.rounds):
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
t < s
t > s
t == s
t > s
t < s
def calibrate(self):
s = unicode(join(map(str,range(10))))
t = unicode(join(map(str,range(10))) + "abc")
for i in xrange(self.rounds):
pass
class CreateUnicodeWithConcat(Test):
version = 2.0
operations = 10 * 5
rounds = 80000
def test(self):
for i in xrange(self.rounds):
s = u'om'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
s = s + u'xax'
s = s + u'xbx'
s = s + u'xcx'
s = s + u'xdx'
s = s + u'xex'
def calibrate(self):
for i in xrange(self.rounds):
pass
class UnicodeSlicing(Test):
version = 2.0
operations = 5 * 7
rounds = 140000
def test(self):
s = unicode(join(map(str,range(100))))
for i in xrange(self.rounds):
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
s[50:]
s[:25]
s[50:55]
s[-1:]
s[:1]
s[2:]
s[11:-11]
def calibrate(self):
s = unicode(join(map(str,range(100))))
for i in xrange(self.rounds):
pass
### String methods
class UnicodeMappings(Test):
version = 2.0
operations = 3 * (5 + 4 + 2 + 1)
rounds = 10000
def test(self):
s = join(map(unichr,range(20)),'')
t = join(map(unichr,range(100)),'')
u = join(map(unichr,range(500)),'')
v = join(map(unichr,range(1000)),'')
for i in xrange(self.rounds):
s.lower()
s.lower()
s.lower()
s.lower()
s.lower()
s.upper()
s.upper()
s.upper()
s.upper()
s.upper()
s.title()
s.title()
s.title()
s.title()
s.title()
t.lower()
t.lower()
t.lower()
t.lower()
t.upper()
t.upper()
t.upper()
t.upper()
t.title()
t.title()
t.title()
t.title()
u.lower()
u.lower()
u.upper()
u.upper()
u.title()
u.title()
v.lower()
v.upper()
v.title()
def calibrate(self):
s = join(map(unichr,range(20)),'')
t = join(map(unichr,range(100)),'')
u = join(map(unichr,range(500)),'')
v = join(map(unichr,range(1000)),'')
for i in xrange(self.rounds):
pass
class UnicodePredicates(Test):
version = 2.0
operations = 5 * 9
rounds = 120000
def test(self):
data = (u'abc', u'123', u' ', u'\u1234\u2345\u3456', u'\uFFFF'*10)
len_data = len(data)
for i in xrange(self.rounds):
s = data[i % len_data]
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
s.isalnum()
s.isalpha()
s.isdecimal()
s.isdigit()
s.islower()
s.isnumeric()
s.isspace()
s.istitle()
s.isupper()
def calibrate(self):
data = (u'abc', u'123', u' ', u'\u1234\u2345\u3456', u'\uFFFF'*10)
len_data = len(data)
for i in xrange(self.rounds):
s = data[i % len_data]
try:
import unicodedata
except ImportError:
pass
else:
class UnicodeProperties(Test):
version = 2.0
operations = 5 * 8
rounds = 100000
def test(self):
data = (u'a', u'1', u' ', u'\u1234', u'\uFFFF')
len_data = len(data)
digit = unicodedata.digit
numeric = unicodedata.numeric
decimal = unicodedata.decimal
category = unicodedata.category
bidirectional = unicodedata.bidirectional
decomposition = unicodedata.decomposition
mirrored = unicodedata.mirrored
combining = unicodedata.combining
for i in xrange(self.rounds):
c = data[i % len_data]
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
digit(c, None)
numeric(c, None)
decimal(c, None)
category(c)
bidirectional(c)
decomposition(c)
mirrored(c)
combining(c)
def calibrate(self):
data = (u'a', u'1', u' ', u'\u1234', u'\uFFFF')
len_data = len(data)
digit = unicodedata.digit
numeric = unicodedata.numeric
decimal = unicodedata.decimal
category = unicodedata.category
bidirectional = unicodedata.bidirectional
decomposition = unicodedata.decomposition
mirrored = unicodedata.mirrored
combining = unicodedata.combining
for i in xrange(self.rounds):
c = data[i % len_data]
|
google/google-ctf
|
third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Tools/pybench/Unicode.py
|
Python
|
apache-2.0
| 11,642
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.text}.
"""
from cStringIO import StringIO
from twisted.trial import unittest
from twisted.python import text
sampleText = \
"""Every attempt to employ mathematical methods in the study of chemical
questions must be considered profoundly irrational and contrary to the
spirit of chemistry ... If mathematical analysis should ever hold a
prominent place in chemistry - an aberration which is happily almost
impossible - it would occasion a rapid and widespread degeneration of that
science.
-- Auguste Comte, Philosophie Positive, Paris, 1838
"""
class WrapTest(unittest.TestCase):
"""
Tests for L{text.greedyWrap}.
"""
def setUp(self):
self.lineWidth = 72
self.sampleSplitText = sampleText.split()
self.output = text.wordWrap(sampleText, self.lineWidth)
def test_wordCount(self):
"""
Compare the number of words.
"""
words = []
for line in self.output:
words.extend(line.split())
wordCount = len(words)
sampleTextWordCount = len(self.sampleSplitText)
self.assertEqual(wordCount, sampleTextWordCount)
def test_wordMatch(self):
"""
Compare the lists of words.
"""
words = []
for line in self.output:
words.extend(line.split())
# Using assertEqual here prints out some
# rather too long lists.
self.failUnless(self.sampleSplitText == words)
def test_lineLength(self):
"""
Check the length of the lines.
"""
failures = []
for line in self.output:
if not len(line) <= self.lineWidth:
failures.append(len(line))
if failures:
self.fail("%d of %d lines were too long.\n"
"%d < %s" % (len(failures), len(self.output),
self.lineWidth, failures))
def test_doubleNewline(self):
"""
Allow paragraphs delimited by two \ns.
"""
sampleText = "et\n\nphone\nhome."
result = text.wordWrap(sampleText, self.lineWidth)
self.assertEqual(result, ["et", "", "phone home.", ""])
class LineTest(unittest.TestCase):
"""
Tests for L{isMultiline} and L{endsInNewline}.
"""
def test_isMultiline(self):
"""
L{text.isMultiline} returns C{True} if the string has a newline in it.
"""
s = 'This code\n "breaks."'
m = text.isMultiline(s)
self.assertTrue(m)
s = 'This code does not "break."'
m = text.isMultiline(s)
self.assertFalse(m)
def test_endsInNewline(self):
"""
L{text.endsInNewline} returns C{True} if the string ends in a newline.
"""
s = 'newline\n'
m = text.endsInNewline(s)
self.assertTrue(m)
s = 'oldline'
m = text.endsInNewline(s)
self.assertFalse(m)
class StringyStringTest(unittest.TestCase):
"""
Tests for L{text.stringyString}.
"""
def test_tuple(self):
"""
Tuple elements are displayed on separate lines.
"""
s = ('a', 'b')
m = text.stringyString(s)
self.assertEqual(m, '(a,\n b,)\n')
def test_dict(self):
"""
Dicts elements are displayed using C{str()}.
"""
s = {'a': 0}
m = text.stringyString(s)
self.assertEqual(m, '{a: 0}')
def test_list(self):
"""
List elements are displayed on separate lines using C{str()}.
"""
s = ['a', 'b']
m = text.stringyString(s)
self.assertEqual(m, '[a,\n b,]\n')
class SplitTest(unittest.TestCase):
"""
Tests for L{text.splitQuoted}.
"""
def test_oneWord(self):
"""
Splitting strings with one-word phrases.
"""
s = 'This code "works."'
r = text.splitQuoted(s)
self.assertEqual(['This', 'code', 'works.'], r)
def test_multiWord(self):
s = 'The "hairy monkey" likes pie.'
r = text.splitQuoted(s)
self.assertEqual(['The', 'hairy monkey', 'likes', 'pie.'], r)
# Some of the many tests that would fail:
#def test_preserveWhitespace(self):
# phrase = '"MANY SPACES"'
# s = 'With %s between.' % (phrase,)
# r = text.splitQuoted(s)
# self.assertEqual(['With', phrase, 'between.'], r)
#def test_escapedSpace(self):
# s = r"One\ Phrase"
# r = text.splitQuoted(s)
# self.assertEqual(["One Phrase"], r)
class StrFileTest(unittest.TestCase):
def setUp(self):
self.io = StringIO("this is a test string")
def tearDown(self):
pass
def test_1_f(self):
self.assertEqual(False, text.strFile("x", self.io))
def test_1_1(self):
self.assertEqual(True, text.strFile("t", self.io))
def test_1_2(self):
self.assertEqual(True, text.strFile("h", self.io))
def test_1_3(self):
self.assertEqual(True, text.strFile("i", self.io))
def test_1_4(self):
self.assertEqual(True, text.strFile("s", self.io))
def test_1_5(self):
self.assertEqual(True, text.strFile("n", self.io))
def test_1_6(self):
self.assertEqual(True, text.strFile("g", self.io))
def test_3_1(self):
self.assertEqual(True, text.strFile("thi", self.io))
def test_3_2(self):
self.assertEqual(True, text.strFile("his", self.io))
def test_3_3(self):
self.assertEqual(True, text.strFile("is ", self.io))
def test_3_4(self):
self.assertEqual(True, text.strFile("ing", self.io))
def test_3_f(self):
self.assertEqual(False, text.strFile("bla", self.io))
def test_large_1(self):
self.assertEqual(True, text.strFile("this is a test", self.io))
def test_large_2(self):
self.assertEqual(True, text.strFile("is a test string", self.io))
def test_large_f(self):
self.assertEqual(False, text.strFile("ds jhfsa k fdas", self.io))
def test_overlarge_f(self):
self.assertEqual(False, text.strFile("djhsakj dhsa fkhsa s,mdbnfsauiw bndasdf hreew", self.io))
def test_self(self):
self.assertEqual(True, text.strFile("this is a test string", self.io))
def test_insensitive(self):
self.assertEqual(True, text.strFile("ThIs is A test STRING", self.io, False))
|
skycucumber/Messaging-Gateway
|
webapp/venv/lib/python2.7/site-packages/twisted/test/test_text.py
|
Python
|
gpl-2.0
| 6,494
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.dialogflow_v2.types import session_entity_type
from google.cloud.dialogflow_v2.types import (
session_entity_type as gcd_session_entity_type,
)
from google.protobuf import empty_pb2 # type: ignore
from .base import SessionEntityTypesTransport, DEFAULT_CLIENT_INFO
class SessionEntityTypesGrpcTransport(SessionEntityTypesTransport):
"""gRPC backend transport for SessionEntityTypes.
Service for managing
[SessionEntityTypes][google.cloud.dialogflow.v2.SessionEntityType].
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def list_session_entity_types(
self,
) -> Callable[
[session_entity_type.ListSessionEntityTypesRequest],
session_entity_type.ListSessionEntityTypesResponse,
]:
r"""Return a callable for the list session entity types method over gRPC.
Returns the list of all session entity types in the
specified session.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.ListSessionEntityTypesRequest],
~.ListSessionEntityTypesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_session_entity_types" not in self._stubs:
self._stubs["list_session_entity_types"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.SessionEntityTypes/ListSessionEntityTypes",
request_serializer=session_entity_type.ListSessionEntityTypesRequest.serialize,
response_deserializer=session_entity_type.ListSessionEntityTypesResponse.deserialize,
)
return self._stubs["list_session_entity_types"]
@property
def get_session_entity_type(
self,
) -> Callable[
[session_entity_type.GetSessionEntityTypeRequest],
session_entity_type.SessionEntityType,
]:
r"""Return a callable for the get session entity type method over gRPC.
Retrieves the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.GetSessionEntityTypeRequest],
~.SessionEntityType]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_session_entity_type" not in self._stubs:
self._stubs["get_session_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.SessionEntityTypes/GetSessionEntityType",
request_serializer=session_entity_type.GetSessionEntityTypeRequest.serialize,
response_deserializer=session_entity_type.SessionEntityType.deserialize,
)
return self._stubs["get_session_entity_type"]
@property
def create_session_entity_type(
self,
) -> Callable[
[gcd_session_entity_type.CreateSessionEntityTypeRequest],
gcd_session_entity_type.SessionEntityType,
]:
r"""Return a callable for the create session entity type method over gRPC.
Creates a session entity type.
If the specified session entity type already exists,
overrides the session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.CreateSessionEntityTypeRequest],
~.SessionEntityType]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_session_entity_type" not in self._stubs:
self._stubs["create_session_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.SessionEntityTypes/CreateSessionEntityType",
request_serializer=gcd_session_entity_type.CreateSessionEntityTypeRequest.serialize,
response_deserializer=gcd_session_entity_type.SessionEntityType.deserialize,
)
return self._stubs["create_session_entity_type"]
@property
def update_session_entity_type(
self,
) -> Callable[
[gcd_session_entity_type.UpdateSessionEntityTypeRequest],
gcd_session_entity_type.SessionEntityType,
]:
r"""Return a callable for the update session entity type method over gRPC.
Updates the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.UpdateSessionEntityTypeRequest],
~.SessionEntityType]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_session_entity_type" not in self._stubs:
self._stubs["update_session_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.SessionEntityTypes/UpdateSessionEntityType",
request_serializer=gcd_session_entity_type.UpdateSessionEntityTypeRequest.serialize,
response_deserializer=gcd_session_entity_type.SessionEntityType.deserialize,
)
return self._stubs["update_session_entity_type"]
@property
def delete_session_entity_type(
self,
) -> Callable[
[session_entity_type.DeleteSessionEntityTypeRequest], empty_pb2.Empty
]:
r"""Return a callable for the delete session entity type method over gRPC.
Deletes the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Returns:
Callable[[~.DeleteSessionEntityTypeRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_session_entity_type" not in self._stubs:
self._stubs["delete_session_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.SessionEntityTypes/DeleteSessionEntityType",
request_serializer=session_entity_type.DeleteSessionEntityTypeRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_session_entity_type"]
def close(self):
self.grpc_channel.close()
__all__ = ("SessionEntityTypesGrpcTransport",)
|
googleapis/python-dialogflow
|
google/cloud/dialogflow_v2/services/session_entity_types/transports/grpc.py
|
Python
|
apache-2.0
| 18,035
|
#!/usr/bin/env python
import sys
import yaml
from PIL import Image
import math
def find_bounds(map_image):
x_min = map_image.size[0]
x_end = 0
y_min = map_image.size[1]
y_end = 0
pix = map_image.load()
for x in range(map_image.size[0]):
for y in range(map_image.size[1]):
val = pix[x, y]
if val != 205: # not unknown
x_min = min(x, x_min)
x_end = max(x, x_end)
y_min = min(y, y_min)
y_end = max(y, y_end)
return x_min, x_end, y_min, y_end
def computed_cropped_origin(map_image, bounds, resolution, origin):
""" Compute the image for the cropped map when map_image is cropped by bounds and had origin before. """
ox = origin[0]
oy = origin[1]
oth = origin[2]
# First figure out the delta we have to translate from the lower left corner (which is the origin)
# in the image system
dx = bounds[0] * resolution
dy = (map_image.size[1] - bounds[3]) * resolution
# Next rotate this by the theta and add to the old origin
new_ox = ox + dx * math.cos(oth) - dy * math.sin(oth)
new_oy = oy + dx * math.sin(oth) + dy * math.cos(oth)
return [new_ox, new_oy, oth]
if __name__ == "__main__":
if len(sys.argv) < 2:
print >> sys.stderr, "Usage: %s map.yaml [cropped.yaml]" % sys.argv[0]
sys.exit(1)
with open(sys.argv[1]) as f:
map_data = yaml.safe_load(f)
if len(sys.argv) > 2:
crop_name = sys.argv[2]
if crop_name.endswith(".yaml"):
crop_name = crop_name[:-5]
crop_yaml = crop_name + ".yaml"
crop_image = crop_name + ".pgm"
else:
crop_yaml = "cropped.yaml"
crop_image = "cropped.pgm"
map_image_file = map_data["image"]
resolution = map_data["resolution"]
origin = map_data["origin"]
map_image = Image.open(map_image_file)
bounds = find_bounds(map_image)
# left, upper, right, lower
cropped_image = map_image.crop((bounds[0], bounds[2], bounds[1] + 1, bounds[3] + 1))
cropped_image.save(crop_image)
map_data["image"] = crop_image
map_data["origin"] = computed_cropped_origin(map_image, bounds, resolution, origin)
with open(crop_yaml, "w") as f:
yaml.dump(map_data, f)
|
OSUrobotics/long-term-mapping
|
timemap_server/scripts/crop_map.py
|
Python
|
gpl-2.0
| 2,296
|
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from scipy.fftpack import fft, ifft, rfft, irfft
from .utils import sizeof_fmt, logger, get_config, warn, _explain_exception
# Support CUDA for FFTs; requires scikits.cuda and pycuda
_cuda_capable = False
_multiply_inplace_c128 = _halve_c128 = _real_c128 = None
def _get_cudafft():
"""Helper to deal with scikit-cuda namespace change"""
try:
from skcuda import fft
except ImportError:
try:
from scikits.cuda import fft
except ImportError:
fft = None
return fft
def get_cuda_memory():
"""Get the amount of free memory for CUDA operations
Returns
-------
memory : str
The amount of available memory as a human-readable string.
"""
if not _cuda_capable:
warn('CUDA not enabled, returning zero for memory')
mem = 0
else:
from pycuda.driver import mem_get_info
mem = mem_get_info()[0]
return sizeof_fmt(mem)
def init_cuda(ignore_config=False):
"""Initialize CUDA functionality
This function attempts to load the necessary interfaces
(hardware connectivity) to run CUDA-based filtering. This
function should only need to be run once per session.
If the config var (set via mne.set_config or in ENV)
MNE_USE_CUDA == 'true', this function will be executed when
the first CUDA setup is performed. If this variable is not
set, this function can be manually executed.
"""
global _cuda_capable, _multiply_inplace_c128, _halve_c128, _real_c128
if _cuda_capable:
return
if not ignore_config and (get_config('MNE_USE_CUDA', 'false').lower() !=
'true'):
logger.info('CUDA not enabled in config, skipping initialization')
return
# Triage possible errors for informative messaging
_cuda_capable = False
try:
from pycuda import gpuarray, driver # noqa
from pycuda.elementwise import ElementwiseKernel
except ImportError:
warn('module pycuda not found, CUDA not enabled')
return
try:
# Initialize CUDA; happens with importing autoinit
import pycuda.autoinit # noqa
except ImportError:
warn('pycuda.autoinit could not be imported, likely a hardware error, '
'CUDA not enabled%s' % _explain_exception())
return
# Make sure scikit-cuda is installed
cudafft = _get_cudafft()
if cudafft is None:
warn('module scikit-cuda not found, CUDA not enabled')
return
# let's construct our own CUDA multiply in-place function
_multiply_inplace_c128 = ElementwiseKernel(
'pycuda::complex<double> *a, pycuda::complex<double> *b',
'b[i] *= a[i]', 'multiply_inplace')
_halve_c128 = ElementwiseKernel(
'pycuda::complex<double> *a', 'a[i] /= 2.0', 'halve_value')
_real_c128 = ElementwiseKernel(
'pycuda::complex<double> *a', 'a[i] = real(a[i])', 'real_value')
# Make sure we can use 64-bit FFTs
try:
cudafft.Plan(16, np.float64, np.complex128) # will get auto-GC'ed
except Exception:
warn('Device does not appear to support 64-bit FFTs, CUDA not '
'enabled%s' % _explain_exception())
return
_cuda_capable = True
# Figure out limit for CUDA FFT calculations
logger.info('Enabling CUDA with %s available memory' % get_cuda_memory())
###############################################################################
# Repeated FFT multiplication
def setup_cuda_fft_multiply_repeated(n_jobs, h_fft):
"""Set up repeated CUDA FFT multiplication with a given filter
Parameters
----------
n_jobs : int | str
If n_jobs == 'cuda', the function will attempt to set up for CUDA
FFT multiplication.
h_fft : array
The filtering function that will be used repeatedly.
If n_jobs='cuda', this function will be shortened (since CUDA
assumes FFTs of real signals are half the length of the signal)
and turned into a gpuarray.
Returns
-------
n_jobs : int
Sets n_jobs = 1 if n_jobs == 'cuda' was passed in, otherwise
original n_jobs is passed.
cuda_dict : dict
Dictionary with the following CUDA-related variables:
use_cuda : bool
Whether CUDA should be used.
fft_plan : instance of FFTPlan
FFT plan to use in calculating the FFT.
ifft_plan : instance of FFTPlan
FFT plan to use in calculating the IFFT.
x_fft : instance of gpuarray
Empty allocated GPU space for storing the result of the
frequency-domain multiplication.
x : instance of gpuarray
Empty allocated GPU space for the data to filter.
h_fft : array | instance of gpuarray
This will either be a gpuarray (if CUDA enabled) or np.ndarray.
If CUDA is enabled, h_fft will be modified appropriately for use
with filter.fft_multiply().
Notes
-----
This function is designed to be used with fft_multiply_repeated().
"""
cuda_dict = dict(use_cuda=False, fft_plan=None, ifft_plan=None,
x_fft=None, x=None)
n_fft = len(h_fft)
cuda_fft_len = int((n_fft - (n_fft % 2)) / 2 + 1)
if n_jobs == 'cuda':
n_jobs = 1
init_cuda()
if _cuda_capable:
from pycuda import gpuarray
cudafft = _get_cudafft()
# set up all arrays necessary for CUDA
# try setting up for float64
try:
# do the IFFT normalization now so we don't have to later
h_fft = gpuarray.to_gpu(h_fft[:cuda_fft_len]
.astype('complex_') / len(h_fft))
cuda_dict.update(
use_cuda=True,
fft_plan=cudafft.Plan(n_fft, np.float64, np.complex128),
ifft_plan=cudafft.Plan(n_fft, np.complex128, np.float64),
x_fft=gpuarray.empty(cuda_fft_len, np.complex128),
x=gpuarray.empty(int(n_fft), np.float64))
logger.info('Using CUDA for FFT FIR filtering')
except Exception as exp:
logger.info('CUDA not used, could not instantiate memory '
'(arrays may be too large: "%s"), falling back to '
'n_jobs=1' % str(exp))
else:
logger.info('CUDA not used, CUDA could not be initialized, '
'falling back to n_jobs=1')
return n_jobs, cuda_dict, h_fft
def fft_multiply_repeated(h_fft, x, cuda_dict=dict(use_cuda=False)):
"""Do FFT multiplication by a filter function (possibly using CUDA)
Parameters
----------
h_fft : 1-d array or gpuarray
The filtering array to apply.
x : 1-d array
The array to filter.
cuda_dict : dict
Dictionary constructed using setup_cuda_multiply_repeated().
Returns
-------
x : 1-d array
Filtered version of x.
"""
if not cuda_dict['use_cuda']:
# do the fourier-domain operations
x = np.real(ifft(h_fft * fft(x), overwrite_x=True)).ravel()
else:
cudafft = _get_cudafft()
# do the fourier-domain operations, results in second param
cuda_dict['x'].set(x.astype(np.float64))
cudafft.fft(cuda_dict['x'], cuda_dict['x_fft'], cuda_dict['fft_plan'])
_multiply_inplace_c128(h_fft, cuda_dict['x_fft'])
# If we wanted to do it locally instead of using our own kernel:
# cuda_seg_fft.set(cuda_seg_fft.get() * h_fft)
cudafft.ifft(cuda_dict['x_fft'], cuda_dict['x'],
cuda_dict['ifft_plan'], False)
x = np.array(cuda_dict['x'].get(), dtype=x.dtype, subok=True,
copy=False)
return x
###############################################################################
# FFT Resampling
def setup_cuda_fft_resample(n_jobs, W, new_len):
"""Set up CUDA FFT resampling
Parameters
----------
n_jobs : int | str
If n_jobs == 'cuda', the function will attempt to set up for CUDA
FFT resampling.
W : array
The filtering function to be used during resampling.
If n_jobs='cuda', this function will be shortened (since CUDA
assumes FFTs of real signals are half the length of the signal)
and turned into a gpuarray.
new_len : int
The size of the array following resampling.
Returns
-------
n_jobs : int
Sets n_jobs = 1 if n_jobs == 'cuda' was passed in, otherwise
original n_jobs is passed.
cuda_dict : dict
Dictionary with the following CUDA-related variables:
use_cuda : bool
Whether CUDA should be used.
fft_plan : instance of FFTPlan
FFT plan to use in calculating the FFT.
ifft_plan : instance of FFTPlan
FFT plan to use in calculating the IFFT.
x_fft : instance of gpuarray
Empty allocated GPU space for storing the result of the
frequency-domain multiplication.
x : instance of gpuarray
Empty allocated GPU space for the data to resample.
W : array | instance of gpuarray
This will either be a gpuarray (if CUDA enabled) or np.ndarray.
If CUDA is enabled, W will be modified appropriately for use
with filter.fft_multiply().
Notes
-----
This function is designed to be used with fft_resample().
"""
cuda_dict = dict(use_cuda=False, fft_plan=None, ifft_plan=None,
x_fft=None, x=None, y_fft=None, y=None)
n_fft_x, n_fft_y = len(W), new_len
cuda_fft_len_x = int((n_fft_x - (n_fft_x % 2)) // 2 + 1)
cuda_fft_len_y = int((n_fft_y - (n_fft_y % 2)) // 2 + 1)
if n_jobs == 'cuda':
n_jobs = 1
init_cuda()
if _cuda_capable:
# try setting up for float64
from pycuda import gpuarray
cudafft = _get_cudafft()
try:
# do the IFFT normalization now so we don't have to later
W = gpuarray.to_gpu(W[:cuda_fft_len_x]
.astype('complex_') / n_fft_y)
cuda_dict.update(
use_cuda=True,
fft_plan=cudafft.Plan(n_fft_x, np.float64, np.complex128),
ifft_plan=cudafft.Plan(n_fft_y, np.complex128, np.float64),
x_fft=gpuarray.zeros(max(cuda_fft_len_x,
cuda_fft_len_y), np.complex128),
x=gpuarray.empty(max(int(n_fft_x),
int(n_fft_y)), np.float64))
logger.info('Using CUDA for FFT resampling')
except Exception:
logger.info('CUDA not used, could not instantiate memory '
'(arrays may be too large), falling back to '
'n_jobs=1')
else:
logger.info('CUDA not used, CUDA could not be initialized, '
'falling back to n_jobs=1')
return n_jobs, cuda_dict, W
def fft_resample(x, W, new_len, npads, to_removes,
cuda_dict=dict(use_cuda=False)):
"""Do FFT resampling with a filter function (possibly using CUDA)
Parameters
----------
x : 1-d array
The array to resample. Will be converted to float64 if necessary.
W : 1-d array or gpuarray
The filtering function to apply.
new_len : int
The size of the output array (before removing padding).
npads : tuple of int
Amount of padding to apply to the start and end of the
signal before resampling.
to_removes : tuple of int
Number of samples to remove after resampling.
cuda_dict : dict
Dictionary constructed using setup_cuda_multiply_repeated().
Returns
-------
x : 1-d array
Filtered version of x.
"""
# add some padding at beginning and end to make this work a little cleaner
if x.dtype != np.float64:
x = x.astype(np.float64)
x = _smart_pad(x, npads)
old_len = len(x)
shorter = new_len < old_len
if not cuda_dict['use_cuda']:
N = int(min(new_len, old_len))
# The below is equivalent to this, but faster
# sl_1 = slice((N + 1) // 2)
# y_fft = np.zeros(new_len, np.complex128)
# x_fft = fft(x).ravel() * W
# y_fft[sl_1] = x_fft[sl_1]
# sl_2 = slice(-(N - 1) // 2, None)
# y_fft[sl_2] = x_fft[sl_2]
# y = np.real(ifft(y_fft, overwrite_x=True)).ravel()
x_fft = rfft(x).ravel()
x_fft *= W[np.arange(1, len(x) + 1) // 2].real
y_fft = np.zeros(new_len, np.float64)
sl_1 = slice(N)
y_fft[sl_1] = x_fft[sl_1]
if min(new_len, old_len) % 2 == 0:
if new_len > old_len:
y_fft[N - 1] /= 2.
y = irfft(y_fft, overwrite_x=True).ravel()
else:
cudafft = _get_cudafft()
cuda_dict['x'].set(np.concatenate((x, np.zeros(max(new_len - old_len,
0), x.dtype))))
# do the fourier-domain operations, results put in second param
cudafft.fft(cuda_dict['x'], cuda_dict['x_fft'], cuda_dict['fft_plan'])
_multiply_inplace_c128(W, cuda_dict['x_fft'])
# This is not straightforward, but because x_fft and y_fft share
# the same data (and only one half of the full DFT is stored), we
# don't have to transfer the slice like we do in scipy. All we
# need to worry about is the Nyquist component, either halving it
# or taking just the real component...
use_len = new_len if shorter else old_len
func = _real_c128 if shorter else _halve_c128
if use_len % 2 == 0:
nyq = int((use_len - (use_len % 2)) // 2)
func(cuda_dict['x_fft'], slice=slice(nyq, nyq + 1))
cudafft.ifft(cuda_dict['x_fft'], cuda_dict['x'],
cuda_dict['ifft_plan'], scale=False)
y = cuda_dict['x'].get()[:new_len if shorter else None]
# now let's trim it back to the correct size (if there was padding)
if (to_removes > 0).any():
keep = np.ones((new_len), dtype='bool')
keep[:to_removes[0]] = False
keep[-to_removes[1]:] = False
y = np.compress(keep, y)
return y
###############################################################################
# Misc
# this has to go in mne.cuda instead of mne.filter to avoid import errors
def _smart_pad(x, n_pad):
"""Pad vector x
"""
if (n_pad == 0).all():
return x
elif (n_pad < 0).any():
raise RuntimeError('n_pad must be non-negative')
# need to pad with zeros if len(x) <= npad
l_z_pad = np.zeros(max(n_pad[0] - len(x) + 1, 0), dtype=x.dtype)
r_z_pad = np.zeros(max(n_pad[0] - len(x) + 1, 0), dtype=x.dtype)
return np.concatenate([l_z_pad, 2 * x[0] - x[n_pad[0]:0:-1], x,
2 * x[-1] - x[-2:-n_pad[1] - 2:-1], r_z_pad])
|
jniediek/mne-python
|
mne/cuda.py
|
Python
|
bsd-3-clause
| 15,317
|
class Queue:
"""Stack class represents a first-in-first-out (FIFO) stack of objects"""
def __init__(self):
self.items = []
# def isEmpty(self):
# code this method such that it returns a boolean when the stack is empty
def enqueue(self, item):
self.items.insert(0, item)
# def dequeue(self):
# code this so that it returns and removes the first entry from the Queue
# in this case it is the last element of the array
def size(self):
return len(self.items)
|
sjamcsclub/ROOM-B-CS-Club-Materials
|
Stacks and Queues/Student Queue.py
|
Python
|
gpl-3.0
| 520
|
import unittest
import epidb_client
from epidb_client import EpiDBClient
from epidb_client.tests import config
class LiveResponseSubmitTestCase(unittest.TestCase):
def setUp(self):
self.client = EpiDBClient(config.api_key)
self.client.server = config.server
self.answers = {'q0000': '0',
'q0001': '1',
'q0002': '2'}
def testSuccess(self):
result = self.client.response_submit(config.user_id,
config.survey_id,
self.answers)
self.assertEqual(result['stat'], 'ok')
class LiveResponseSubmitUnauthorizedTestCase(unittest.TestCase):
def setUp(self):
self.client = EpiDBClient(config.api_key_invalid)
self.client.server = config.server
self.answers = {'q0000': '0',
'q0001': '1',
'q0002': '2'}
def testUnauthorized(self):
try:
self.client.response_submit(config.user_id,
config.survey_id,
self.answers)
self.fail()
except epidb_client.ResponseError, e:
self.assertEqual(e.code, 401)
class GGMResponseTestCase(unittest.TestCase):
def setUp(self):
self.client = EpiDBClient(config.api_key)
self.client.server = config.server
self.answers = {'a20000': '0',
'a21000': '2009-12-15',}
def testSuccess(self):
result = self.client.response_submit(config.user_id,
'dev-response-nl-0.0',
self.answers)
self.assertEqual(result['stat'], 'ok')
if __name__ == '__main__':
unittest.main()
# vim: set ts=4 sts=4 expandtab:
|
ISIFoundation/influenzanet-epidb-client
|
src/epidb_client/tests/test_live_response_submit.py
|
Python
|
agpl-3.0
| 1,878
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""Burning a candle at both ends.
"""
__version__ = "$Id$"
#end_pymotw_header
import collections
import threading
import time
candle = collections.deque(xrange(11))
def burn(direction, nextSource):
while True:
try:
next = nextSource()
except IndexError:
break
else:
print '%8s: %s' % (direction, next)
time.sleep(0.1)
print '%8s done' % direction
return
left = threading.Thread(target=burn, args=('Left', candle.popleft))
right = threading.Thread(target=burn, args=('Right', candle.pop))
left.start()
right.start()
left.join()
right.join()
|
qilicun/python
|
python2/PyMOTW-1.132/PyMOTW/collections/collections_deque_both_ends.py
|
Python
|
gpl-3.0
| 731
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, ConfigParser, tweepy, inspect, hashlib
path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# read config
config = ConfigParser.SafeConfigParser()
config.read(os.path.join(path, "config"))
# your hashtag or search query and tweet language (empty = all languages)
hashtag = config.get("settings","search_query")
tweetLanguage = config.get("settings","tweet_language")
# blacklisted users and words
userBlacklist = []
wordBlacklist = ["RT", u"♺"]
# build savepoint path + file
hashedHashtag = hashlib.md5(hashtag).hexdigest()
last_id_filename = "last_id_hashtag_%s" % hashedHashtag
rt_bot_path = os.path.dirname(os.path.abspath(__file__))
last_id_file = os.path.join(rt_bot_path, last_id_filename)
# create bot
auth = tweepy.OAuthHandler(config.get("twitter","consumer_key"), config.get("twitter","consumer_secret"))
auth.set_access_token(config.get("twitter","access_token"), config.get("twitter","access_token_secret"))
api = tweepy.API(auth)
# retrieve last savepoint if available
try:
with open(last_id_file, "r") as file:
savepoint = file.read()
except IOError:
savepoint = ""
print "No savepoint found. Trying to get as many results as possible."
# search query
timelineIterator = tweepy.Cursor(api.search, q=hashtag, since_id=savepoint, lang=tweetLanguage).items()
# put everything into a list to be able to sort/filter
timeline = []
for status in timelineIterator:
timeline.append(status)
try:
last_tweet_id = timeline[0].id
except IndexError:
last_tweet_id = savepoint
# filter @replies/blacklisted words & users out and reverse timeline
timeline = filter(lambda status: status.text[0] != "@", timeline)
timeline = filter(lambda status: not any(word in status.text.split() for word in wordBlacklist), timeline)
timeline = filter(lambda status: status.author.screen_name not in userBlacklist, timeline)
timeline.reverse()
tw_counter = 0
err_counter = 0
# iterate the timeline and retweet
for status in timeline:
try:
print "(%(date)s) %(name)s: %(message)s\n" % \
{ "date" : status.created_at,
"name" : status.author.screen_name.encode('utf-8'),
"message" : status.text.encode('utf-8') }
api.retweet(status.id)
tw_counter += 1
except tweepy.error.TweepError as e:
# just in case tweet got deleted in the meantime or already retweeted
err_counter += 1
#print e
continue
print "Finished. %d Tweets retweeted, %d errors occured." % (tw_counter, err_counter)
# write last retweeted tweet id to file
with open(last_id_file, "w") as file:
file.write(str(last_tweet_id))
|
rmkmahesh/twitter-retweet-bot
|
retweet.py
|
Python
|
mpl-2.0
| 2,615
|
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Integration tests for run_perf_tests."""
import StringIO
import datetime
import json
import re
import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.port.driver import DriverOutput
from webkitpy.port.test import TestPort
from webkitpy.performance_tests.perftest import PerfTest
from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
class EventTargetWrapperTestData:
text = """:Time -> [1486, 1471, 1510, 1505, 1478, 1490] ms
"""
output = """Running Bindings/event-target-wrapper.html (1 of 2)
RESULT Bindings: event-target-wrapper: Time= 1490.0 ms
median= 1488.0 ms, stdev= 14.11751 ms, min= 1471.0 ms, max= 1510.0 ms
Finished: 0.1 s
"""
results = {'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings/event-target-wrapper.html',
'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}}
class SomeParserTestData:
text = """:Time -> [1080, 1120, 1095, 1101, 1104] ms
"""
output = """Running Parser/some-parser.html (2 of 2)
RESULT Parser: some-parser: Time= 1100.0 ms
median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms
Finished: 0.1 s
"""
results = {'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Parser/some-parser.html',
'metrics': {'Time': {'current': [[1080.0, 1120.0, 1095.0, 1101.0, 1104.0]] * 4}}}
class MemoryTestData:
text = """:Time -> [1080, 1120, 1095, 1101, 1104] ms
:JSHeap -> [825000, 811000, 848000, 837000, 829000] bytes
:Malloc -> [529000, 511000, 548000, 536000, 521000] bytes
"""
output = """Running 1 tests
Running Parser/memory-test.html (1 of 1)
RESULT Parser: memory-test: Time= 1100.0 ms
median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms
RESULT Parser: memory-test: JSHeap= 830000.0 bytes
median= 829000.0 bytes, stdev= 12649.11064 bytes, min= 811000.0 bytes, max= 848000.0 bytes
RESULT Parser: memory-test: Malloc= 529000.0 bytes
median= 529000.0 bytes, stdev= 12961.48139 bytes, min= 511000.0 bytes, max= 548000.0 bytes
Finished: 0.1 s
"""
results = {'current': [[1080, 1120, 1095, 1101, 1104]] * 4}
js_heap_results = {'current': [[825000, 811000, 848000, 837000, 829000]] * 4}
malloc_results = {'current': [[529000, 511000, 548000, 536000, 521000]] * 4}
class TestWithSubtestsData:
text = """subtest:Time -> [1, 2, 3, 4, 5] ms
total-test:Time:Total -> [1, 2, 3, 4, 5] ms
total-test/subsubtest:Time -> [1, 2, 3, 4, 5] ms
:Time -> [1080, 1120, 1095, 1101, 1104] ms
"""
output = """Running 1 tests
Running Parser/test-with-subtests.html (1 of 1)
RESULT Parser: test-with-subtests: Time= 1100.0 ms
median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms
Finished: 0.1 s
"""
results = {'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Parser/test-with-subtests.html',
'metrics': {'Time': {'current': [[1080.0, 1120.0, 1095.0, 1101.0, 1104.0]] * 4}},
'tests': {
'subtest': {
'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Parser/test-with-subtests.html',
'metrics': {'Time': {'current': [[1.0, 2.0, 3.0, 4.0, 5.0]] * 4}}},
'total-test': {
'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Parser/test-with-subtests.html',
'metrics': {'Time': {'current': [[1.0, 2.0, 3.0, 4.0, 5.0]] * 4, "aggregators": ["Total"]}},
'tests': {
'subsubtest':
{'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Parser/test-with-subtests.html',
'metrics': {'Time': {'current': [[1.0, 2.0, 3.0, 4.0, 5.0]] * 4}}}}}}}
class TestDriver:
def run_test(self, driver_input, stop_when_done):
text = ''
timeout = False
crash = False
if driver_input.test_name.endswith('pass.html'):
text = SomeParserTestData.text
elif driver_input.test_name.endswith('timeout.html'):
timeout = True
elif driver_input.test_name.endswith('failed.html'):
text = None
elif driver_input.test_name.endswith('tonguey.html'):
text = 'we are not expecting an output from perf tests but RESULT blablabla'
elif driver_input.test_name.endswith('crash.html'):
crash = True
elif driver_input.test_name.endswith('event-target-wrapper.html'):
text = EventTargetWrapperTestData.text
elif driver_input.test_name.endswith('some-parser.html'):
text = SomeParserTestData.text
elif driver_input.test_name.endswith('memory-test.html'):
text = MemoryTestData.text
elif driver_input.test_name.endswith('test-with-subtests.html'):
text = TestWithSubtestsData.text
return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
def start(self):
"""do nothing"""
def stop(self):
"""do nothing"""
class MainTest(unittest.TestCase):
def _normalize_output(self, log):
return re.sub(r'(stdev=\s+\d+\.\d{5})\d+', r'\1', re.sub(r'Finished: [0-9\.]+ s', 'Finished: 0.1 s', log))
def _load_output_json(self, runner):
json_content = runner._host.filesystem.read_text_file(runner._output_json_path())
return json.loads(re.sub(r'("stdev":\s*\d+\.\d{5})\d+', r'\1', json_content))
def create_runner(self, args=[], driver_class=TestDriver):
options, parsed_args = PerfTestsRunner._parse_args(args)
test_port = TestPort(host=MockHost(), options=options)
test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class()
runner = PerfTestsRunner(args=args, port=test_port)
runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
return runner, test_port
def run_test(self, test_name):
runner, port = self.create_runner()
tests = [PerfTest(port, test_name, runner._host.filesystem.join('some-dir', test_name))]
return runner._run_tests_set(tests) == 0
def test_run_passing_test(self):
self.assertTrue(self.run_test('pass.html'))
def test_run_silent_test(self):
self.assertFalse(self.run_test('silent.html'))
def test_run_failed_test(self):
self.assertFalse(self.run_test('failed.html'))
def test_run_tonguey_test(self):
self.assertFalse(self.run_test('tonguey.html'))
def test_run_timeout_test(self):
self.assertFalse(self.run_test('timeout.html'))
def test_run_crash_test(self):
self.assertFalse(self.run_test('crash.html'))
def _tests_for_runner(self, runner, test_names):
filesystem = runner._host.filesystem
tests = []
for test in test_names:
path = filesystem.join(runner._base_path, test)
dirname = filesystem.dirname(path)
tests.append(PerfTest(runner._port, test, path))
return tests
def test_run_test_set_kills_drt_per_run(self):
class TestDriverWithStopCount(TestDriver):
stop_count = 0
def stop(self):
TestDriverWithStopCount.stop_count += 1
runner, port = self.create_runner(driver_class=TestDriverWithStopCount)
tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
unexpected_result_count = runner._run_tests_set(tests)
self.assertEqual(TestDriverWithStopCount.stop_count, 9)
def test_run_test_set_for_parser_tests(self):
runner, port = self.create_runner()
tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
output = OutputCapture()
output.capture_output()
try:
unexpected_result_count = runner._run_tests_set(tests)
finally:
stdout, stderr, log = output.restore_output()
self.assertEqual(unexpected_result_count, 0)
self.assertEqual(self._normalize_output(log), EventTargetWrapperTestData.output + SomeParserTestData.output)
def test_run_memory_test(self):
runner, port = self.create_runner_and_setup_results_template()
runner._timestamp = 123456789
port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory-test.html', 'some content')
output = OutputCapture()
output.capture_output()
try:
unexpected_result_count = runner.run()
finally:
stdout, stderr, log = output.restore_output()
self.assertEqual(unexpected_result_count, 0)
self.assertEqual(self._normalize_output(log), MemoryTestData.output + '\nMOCK: user.open_url: file://...\n')
parser_tests = self._load_output_json(runner)[0]['tests']['Parser']['tests']
self.assertEqual(parser_tests['memory-test']['metrics']['Time'], MemoryTestData.results)
self.assertEqual(parser_tests['memory-test']['metrics']['JSHeap'], MemoryTestData.js_heap_results)
self.assertEqual(parser_tests['memory-test']['metrics']['Malloc'], MemoryTestData.malloc_results)
def test_run_test_with_subtests(self):
runner, port = self.create_runner_and_setup_results_template()
runner._timestamp = 123456789
port.host.filesystem.write_text_file(runner._base_path + '/Parser/test-with-subtests.html', 'some content')
output = OutputCapture()
output.capture_output()
try:
unexpected_result_count = runner.run()
finally:
stdout, stderr, log = output.restore_output()
self.assertEqual(unexpected_result_count, 0)
self.assertEqual(self._normalize_output(log), TestWithSubtestsData.output + '\nMOCK: user.open_url: file://...\n')
parser_tests = self._load_output_json(runner)[0]['tests']['Parser']['tests']
self.maxDiff = None
self.assertEqual(parser_tests['test-with-subtests'], TestWithSubtestsData.results)
def _test_run_with_json_output(self, runner, filesystem, upload_succeeds=False, results_shown=True, expected_exit_code=0, repeat=1, compare_logs=True):
filesystem.write_text_file(runner._base_path + '/Parser/some-parser.html', 'some content')
filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')
uploaded = [False]
def mock_upload_json(hostname, json_path, host_path=None):
# FIXME: Get rid of the hard-coded perf.webkit.org once we've completed the transition.
self.assertIn(hostname, ['some.host'])
self.assertIn(json_path, ['/mock-checkout/output.json'])
self.assertIn(host_path, [None, '/api/report'])
uploaded[0] = upload_succeeds
return upload_succeeds
runner._upload_json = mock_upload_json
runner._timestamp = 123456789
runner._utc_timestamp = datetime.datetime(2013, 2, 8, 15, 19, 37, 460000)
output_capture = OutputCapture()
output_capture.capture_output()
try:
self.assertEqual(runner.run(), expected_exit_code)
finally:
stdout, stderr, logs = output_capture.restore_output()
if not expected_exit_code and compare_logs:
expected_logs = ''
for i in xrange(repeat):
runs = ' (Run %d of %d)' % (i + 1, repeat) if repeat > 1 else ''
expected_logs += 'Running 2 tests%s\n' % runs + EventTargetWrapperTestData.output + SomeParserTestData.output
if results_shown:
expected_logs += 'MOCK: user.open_url: file://...\n'
self.assertEqual(self._normalize_output(logs), expected_logs)
self.assertEqual(uploaded[0], upload_succeeds)
return logs
_event_target_wrapper_and_inspector_results = {
"Bindings":
{"url": "http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings",
"tests": {"event-target-wrapper": EventTargetWrapperTestData.results}},
"Parser":
{"url": "http://trac.webkit.org/browser/trunk/PerformanceTests/Parser",
"tests": {"some-parser": SomeParserTestData.results}}}
def test_run_with_json_output(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
filesystem = port.host.filesystem
self.assertTrue(filesystem.isfile(runner._output_json_path()))
self.assertTrue(filesystem.isfile(filesystem.splitext(runner._output_json_path())[0] + '.html'))
def test_run_with_description(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host', '--description', 'some description'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "description": "some description",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
def create_runner_and_setup_results_template(self, args=[]):
runner, port = self.create_runner(args)
filesystem = port.host.filesystem
filesystem.write_text_file(runner._base_path + '/resources/results-template.html',
'BEGIN<script src="%AbsolutePathToWebKitTrunk%/some.js"></script>'
'<script src="%AbsolutePathToWebKitTrunk%/other.js"></script><script>%PeformanceTestsResultsJSON%</script>END')
filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js', 'jquery content')
return runner, port
def test_run_respects_no_results(self):
runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host', '--no-results'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False, results_shown=False)
self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json'))
def test_run_generates_json_by_default(self):
runner, port = self.create_runner_and_setup_results_template()
filesystem = port.host.filesystem
output_json_path = runner._output_json_path()
results_page_path = filesystem.splitext(output_json_path)[0] + '.html'
self.assertFalse(filesystem.isfile(output_json_path))
self.assertFalse(filesystem.isfile(results_page_path))
self._test_run_with_json_output(runner, port.host.filesystem)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
self.assertTrue(filesystem.isfile(output_json_path))
self.assertTrue(filesystem.isfile(results_page_path))
def test_run_merges_output_by_default(self):
runner, port = self.create_runner_and_setup_results_template()
filesystem = port.host.filesystem
output_json_path = runner._output_json_path()
filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
self._test_run_with_json_output(runner, port.host.filesystem)
self.assertEqual(self._load_output_json(runner), [{"previous": "results"}, {
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
def test_run_respects_reset_results(self):
runner, port = self.create_runner_and_setup_results_template(args=["--reset-results"])
filesystem = port.host.filesystem
output_json_path = runner._output_json_path()
filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
self._test_run_with_json_output(runner, port.host.filesystem)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
pass
def test_run_generates_and_show_results_page(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
page_shown = []
port.show_results_html_file = lambda path: page_shown.append(path)
filesystem = port.host.filesystem
self._test_run_with_json_output(runner, filesystem, results_shown=False)
expected_entry = {"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}
self.maxDiff = None
self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json')
self.assertEqual(self._load_output_json(runner), [expected_entry])
self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
'<script>%s</script>END' % port.host.filesystem.read_text_file(runner._output_json_path()))
self.assertEqual(page_shown[0], '/mock-checkout/output.html')
self._test_run_with_json_output(runner, filesystem, results_shown=False)
self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json')
self.assertEqual(self._load_output_json(runner), [expected_entry, expected_entry])
self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
'<script>%s</script>END' % port.host.filesystem.read_text_file(runner._output_json_path()))
def test_run_respects_no_show_results(self):
show_results_html_file = lambda path: page_shown.append(path)
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
page_shown = []
port.show_results_html_file = show_results_html_file
self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
self.assertEqual(page_shown[0], '/mock-checkout/output.html')
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--no-show-results'])
page_shown = []
port.show_results_html_file = show_results_html_file
self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
self.assertEqual(page_shown, [])
def test_run_with_bad_output_json(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad json')
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"another bad json": "1"}')
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
def test_run_with_slave_config_json(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}')
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}, "builderKey": "value"}])
def test_run_with_bad_slave_config_json(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
logs = self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
self.assertTrue('Missing slave configuration JSON file: /mock-checkout/slave-config.json' in logs)
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', 'bad json')
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '["another bad json"]')
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
def test_run_with_multiple_repositories(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host'])
port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"webkit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"},
"some": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
def test_run_with_upload_json(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
self.assertEqual(generated_json[0]['platform'], 'platform1')
self.assertEqual(generated_json[0]['builderName'], 'builder1')
self.assertEqual(generated_json[0]['buildNumber'], 123)
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
def test_run_with_upload_json_should_generate_perf_webkit_json(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123',
'--slave-config-json-path=/mock-checkout/slave-config.json'])
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value1"}')
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
self.assertTrue(isinstance(generated_json, list))
self.assertEqual(len(generated_json), 1)
output = generated_json[0]
self.maxDiff = None
self.assertEqual(output['platform'], 'platform1')
self.assertEqual(output['buildNumber'], 123)
self.assertEqual(output['buildTime'], '2013-02-08T15:19:37.460000')
self.assertEqual(output['builderName'], 'builder1')
self.assertEqual(output['builderKey'], 'value1')
self.assertEqual(output['revisions'], {'WebKit': {'revision': '5678', 'timestamp': '2013-02-01 08:48:05 +0000'}})
self.assertEqual(output['tests'].keys(), ['Bindings', 'Parser'])
self.assertEqual(sorted(output['tests']['Bindings'].keys()), ['tests', 'url'])
self.assertEqual(output['tests']['Bindings']['url'], 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings')
self.assertEqual(output['tests']['Bindings']['tests'].keys(), ['event-target-wrapper'])
self.assertEqual(output['tests']['Bindings']['tests']['event-target-wrapper'], {
'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings/event-target-wrapper.html',
'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}})
def test_run_with_repeat(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host', '--repeat', '5'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True, repeat=5)
self.assertEqual(self._load_output_json(runner), [
{"buildTime": "2013-02-08T15:19:37.460000",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
{"buildTime": "2013-02-08T15:19:37.460000",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
{"buildTime": "2013-02-08T15:19:37.460000",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
{"buildTime": "2013-02-08T15:19:37.460000",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
{"buildTime": "2013-02-08T15:19:37.460000",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
def test_run_with_test_runner_count(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-runner-count=3'])
self._test_run_with_json_output(runner, port.host.filesystem, compare_logs=False)
generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
self.assertTrue(isinstance(generated_json, list))
self.assertEqual(len(generated_json), 1)
output = generated_json[0]['tests']['Bindings']['tests']['event-target-wrapper']['metrics']['Time']['current']
self.assertEqual(len(output), 3)
expectedMetrics = EventTargetWrapperTestData.results['metrics']['Time']['current'][0]
for metrics in output:
self.assertEqual(metrics, expectedMetrics)
|
loveyoupeng/rt
|
modules/web/src/main/native/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py
|
Python
|
gpl-2.0
| 29,978
|
import numpy as np
from collections import defaultdict
ingredients = ['milk', 'egg', 'sugar', 'salt', 'flour']
fillings = ['banana', 'strawberry jam', 'chocolate', 'walnut']
cake_ingredients = np.array([8.0, 8.0, 4.0, 1.0, 9.0])
cake_fillings = {
'banana': [1, 0, 0, 0],
'strawberry': [0, 30, 0, 0],
'chocolate': [0, 0, 25, 0],
'walnut': [0, 0, 0, 10],
}
def is_I_good(I):
for i in I:
if i<0.0:
return False
return True
for _ in range(input()):
raw_input()
I = np.array(map(float, raw_input().strip().split()))
F = np.array(map(float, raw_input().strip().split()))
MAX = int(min(I/cake_ingredients)*16)
b = int(F[0]/cake_fillings['banana'][0])
s = int(F[1]/cake_fillings['strawberry'][1])
c = int(F[2]/cake_fillings['chocolate'][2])
w = int(F[3]/cake_fillings['walnut'][3])
print(min(MAX, b+s+c+w))
|
tuestudy/ipsc
|
2011/P/pancake-dgoon-easy.py
|
Python
|
mit
| 899
|
import setpath
import functions
import random
# coding: utf-8
import math
import json
from fractions import Fraction
def dummycode(*args):
# if type(args[0]) not in (str,unicode):
# yield args[0]
rid = args[0]
colname = args[1]
val = args[2]
values = json.loads(args[3])
values.pop(0)
yield ("rid","colname", "val")
res = []
for i in xrange(len(values)):
if val == values[i]:
yield (rid,colname+values[i],float(1.0))
else:
yield (rid,colname+values[i],float(0.0))
dummycode.registered = True
def t_distribution_cdf(*args):
from scipy import stats
# colname = args[0]
number = args[0]
degreeOfFreedom = args[1]
# yield ("colname", "valPr")
result = stats.t.cdf(number, degreeOfFreedom)
return result
# yield (colname, result)
t_distribution_cdf.registered = True
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
|
alexpap/exareme
|
exareme-tools/madis/src/functionslocal/row/linearregressionR.py
|
Python
|
mit
| 1,267
|
# Outspline - A highly modular and extensible outliner.
# Copyright (C) 2011-2014 Dario Giovannetti <dev@dariogiovannetti.net>
#
# This file is part of Outspline.
#
# Outspline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Outspline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outspline. If not, see <http://www.gnu.org/licenses/>.
import bisect
import occur_regularly
from exceptions import BadRuleError
_RULE_NAMES = {'local': 'occur_regularly_group_local',
'UTC': 'occur_regularly_group_UTC'}
def make_rule(refstart, interval, rstarts, rend, ralarm, standard, guiconfig):
"""
@param refstart: A sample Unix start time of the first occurrence in a
group.
@param interval: The interval in seconds between the start times of the
first occurrences of two consecutive groups.
@param rstarts: A tuple storing the positive differences in seconds between
the start time of the first occurrence and that of each
occurrence in a group. It must contain at least 0, which
corresponds to the first occurrence of the group.
@param rend: The positive difference in seconds between an occurrence's
start and end times.
@param ralarm: The difference in seconds between an occurrence's start and
alarm times; it is negative if the alarm is set later than
the start time.
@param standard: The time standard to be used, either 'local' or 'UTC'.
@param guiconfig: A place to store any configuration needed only by the
interface.
"""
# Make sure this rule can only produce occurrences compliant with the
# requirements defined in organism_api.update_item_rules
# There's no need to check standard because it's imposed by the API
if isinstance(refstart, int) and refstart >= 0 and \
isinstance(interval, int) and interval > 0 and \
isinstance(rstarts, list) and 0 in rstarts and \
(rend is None or (isinstance(rend, int) and rend > 0)) and \
(ralarm is None or isinstance(ralarm, int)):
refstarts = []
for rstart in rstarts:
if isinstance(rstart, int) and rstart >= 0:
refstarts.append(refstart + rstart)
else:
raise BadRuleError()
# Also take a possible negative (late) alarm time into account, in fact
# the occurrence wouldn't be found if the search range included the
# alarm time but not the actual occurrence time span; remember that
# it's normal that the occurrence is not added to the results if the
# search range is between (and doesn't include) the alarm time and the
# actual occurrence time span
if ralarm is None:
rmax = max((rend, 0))
else:
rmax = max((rend, ralarm * -1, 0))
overlaps = rmax // interval
bgap = interval - rmax % interval
return {
'rule': _RULE_NAMES[standard],
'#': (
refstarts,
interval,
overlaps,
bgap,
rend,
ralarm,
guiconfig,
)
}
else:
raise BadRuleError()
def get_occurrences_range_local(mint, utcmint, maxt, utcoffset, filename, id_,
rule, occs):
for refstart in rule['#'][0]:
srule = rule.copy()
srule['#'][0] = refstart
occur_regularly.get_occurrences_range_local(mint, utcmint, maxt,
utcoffset, filename, id_, srule, occs)
def get_occurrences_range_UTC(mint, utcmint, maxt, utcoffset, filename, id_,
rule, occs):
for refstart in rule['#'][0]:
srule = rule.copy()
srule['#'][0] = refstart
occur_regularly.get_occurrences_range_UTC(mint, utcmint, maxt,
utcoffset, filename, id_, srule, occs)
def get_next_item_occurrences_local(base_time, utcbase, utcoffset, filename,
id_, rule, occs):
for refstart in rule['#'][0]:
srule = rule.copy()
srule['#'][0] = refstart
occur_regularly.get_next_item_occurrences_local(base_time, utcbase,
utcoffset, filename, id_, srule, occs)
def get_next_item_occurrences_UTC(base_time, utcbase, utcoffset, filename,
id_, rule, occs):
for refstart in rule['#'][0]:
srule = rule.copy()
srule['#'][0] = refstart
occur_regularly.get_next_item_occurrences_UTC(base_time, utcbase,
utcoffset, filename, id_, srule, occs)
|
xguse/outspline
|
src/outspline/extensions/organism_basicrules/occur_regularly_group.py
|
Python
|
gpl-3.0
| 5,426
|
import re
from jabbapylib.podium import podium
from jabbapylib.filesystem import ini
def test_read_ini():
ini_file = '{home}/.mozilla/firefox/profiles.ini'.format(home=podium.get_home_dir())
path = ini.read_ini('Profile0', ini_file)['path']
assert re.search('.{8}\.default', path)
|
jabbalaci/jabbapylib
|
tests/filesystem/test_ini.py
|
Python
|
gpl-3.0
| 293
|
import optparse
from os import curdir
from os.path import abspath
import sys
from autoscalebot.tasks import start_autoscaler
from autoscalebot import version
def main(args=sys.argv[1:]):
CLI_ROOT = abspath(curdir)
sys.path.insert(0, CLI_ROOT)
parser = optparse.OptionParser(
usage="%prog or type %prog -h (--help) for help",
version=version
)
parser.add_option("--settings",
dest="settings",
default=None,
type="string",
help='settings to use when autoscaling')
options, args = parser.parse_args()
if options.settings:
settings = __import__(options.settings)
start_autoscaler(settings=settings)
|
wieden-kennedy/autoscalebot
|
autoscalebot/cli.py
|
Python
|
bsd-3-clause
| 747
|
"""
Lovasz-Softmax and Jaccard hinge loss in PyTorch
Maxim Berman 2018 ESAT-PSI KU Leuven (MIT License)
"""
# from __future__ import print_function, division
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
try:
from itertools import ifilterfalse
except ImportError: # py3k
from itertools import filterfalse as ifilterfalse
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1.0 - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def iou_binary(preds, labels, EMPTY=1.0, ignore=None, per_image=True):
"""
IoU for foreground class
binary: 1 foreground, 0 background
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
intersection = ((label == 1) & (pred == 1)).sum()
union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()
if not union:
iou = EMPTY
else:
iou = float(intersection) / float(union)
ious.append(iou)
iou = mean(ious) # mean accross images if per_image
return 100 * iou
def iou(preds, labels, C, EMPTY=1.0, ignore=None, per_image=False):
"""
Array of IoU for each (non ignored) class
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
for i in range(C):
if (
i != ignore
): # The ignored label is sometimes among predicted classes (ENet - CityScapes)
intersection = ((label == i) & (pred == i)).sum()
union = ((label == i) | ((pred == i) & (label != ignore))).sum()
if not union:
iou.append(EMPTY)
else:
iou.append(float(intersection) / float(union))
ious.append(iou)
ious = [mean(iou) for iou in zip(*ious)] # mean accross images if per_image
return 100 * np.array(ious)
# --------------------------- BINARY LOSSES ---------------------------
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
loss = mean(
lovasz_hinge_flat(
*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore)
)
for log, lab in zip(logits, labels)
)
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
return loss
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.0
signs = 2.0 * labels.float() - 1.0
errors = 1.0 - logits * Variable(signs)
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = labels != ignore
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
class StableBCELoss(torch.nn.modules.Module):
def __init__(self):
super(StableBCELoss, self).__init__()
def forward(self, input, target):
neg_abs = -input.abs()
loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()
return loss.mean()
def binary_xloss(logits, labels, ignore=None):
"""
Binary Cross entropy loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
ignore: void class id
"""
logits, labels = flatten_binary_scores(logits, labels, ignore)
loss = StableBCELoss()(logits, Variable(labels.float()))
return loss
# --------------------------- MULTICLASS LOSSES ---------------------------
def lovasz_softmax(probas, labels, classes="present", per_image=False, ignore=None):
"""
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
if per_image:
loss = mean(
lovasz_softmax_flat(
*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore),
classes=classes
)
for prob, lab in zip(probas, labels)
)
else:
loss = lovasz_softmax_flat(
*flatten_probas(probas, labels, ignore), classes=classes
)
return loss
def lovasz_softmax_flat(probas, labels, classes="present"):
"""
Multi-class Lovasz-Softmax loss
probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
labels: [P] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
"""
if probas.numel() == 0:
# only void pixels, the gradients should be 0
return probas * 0.0
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ["all", "present"] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if classes is "present" and fg.sum() == 0:
continue
if C == 1:
if len(classes) > 1:
raise ValueError("Sigmoid output possible only with 1 class")
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (Variable(fg) - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
return mean(losses)
def flatten_probas(probas, labels, ignore=None):
"""
Flattens predictions in the batch
"""
if probas.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
B, C, H, W = probas.size()
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
if ignore is None:
return probas, labels
valid = labels != ignore
vprobas = probas[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobas, vlabels
def xloss(logits, labels, ignore=None):
"""
Cross entropy loss
"""
return F.cross_entropy(logits, Variable(labels), ignore_index=255)
# --------------------------- HELPER FUNCTIONS ---------------------------
def isnan(x):
return x != x
def mean(l, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
l = iter(l)
if ignore_nan:
l = ifilterfalse(isnan, l)
try:
n = 1
acc = next(l)
except StopIteration:
if empty == "raise":
raise ValueError("Empty mean")
return empty
for n, v in enumerate(l, 2):
acc += v
if n == 1:
return acc
return acc / n
|
Diyago/Machine-Learning-scripts
|
DEEP LEARNING/segmentation/Understanding-Clouds-from-Satellite-Images-master/losses/lovasz_losses.py
|
Python
|
apache-2.0
| 8,487
|
import crilib.repositories
import crilib.packaging
import crilib.server
serv = crilib.server.Server("server.yml")
ldr = crilib.packaging.PackageLoader()
pkg = crilib.repositories.PackageMeta("minecraft-vanilla", "MC1.11.2")
ldr.init_pkg(pkg)
ictx = crilib.packaging.InstallContext(pkg, serv)
bundle = ldr.find_inited_package("minecraft-vanilla")
bundle.module.install(ictx)
for p in ictx.requests:
p.install()
|
treyzania/Craftitizer
|
cri/test.py
|
Python
|
mit
| 416
|
# DESCRIPTION: Tests the performance of the engine.
# 4920646f6e5c2774206361726520696620697420776f726b73206f6e20796f7572206d61636869
# 6e652120576520617265206e6f74207368697070696e6720796f7572206d616368696e6521
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import time, unittest
from lib import chessboard, core, engine, movegenerator, pieces, usercontrol
class Timer(object):
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000 # millisecs
if self.verbose:
print 'elapsed time: %f ms' % self.msecs
class TestMoveGenerator(unittest.TestCase):
"""Tests the performance of the move generator."""
def setUp(self):
self.numberofloops = 10
self.board = chessboard.ChessBoard()
self.board[27] = pieces.KingPiece('white')
self.board[45] = pieces.KingPiece('black')
self.generator = movegenerator.MoveGenerator(self.board)
return None
def test_basicmoves(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.basicmoves('white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_pawnpushmoves(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.pawnpushmoves('white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_pawncapturemoves(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.pawncapturemoves('white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_castlemoves(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.castlemoves('white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_enpassantmoves(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.enpassantmoves('white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_onlylegalmoves(self):
moves = self.generator.basicmoves('white')
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.onlylegalmoves('white', moves)
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_illegalmove(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.illegalmove((27, 36), 'white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_kingincheck(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.kingincheck('white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_generatemovelist(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.generator.generatemovelist('white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_initialise_and_generate(self):
with Timer() as t:
for x in xrange(self.numberofloops):
movegenerator.MoveGenerator(self.board).generatemovelist('white')
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
class TestChessboard(unittest.TestCase):
"""Tests the performance of the chessboard."""
def setUp(self):
self.numberofloops = 10
self.board = chessboard.ChessBoard()
self.board.setupnormalboard()
return None
def test_duplicateboard(self):
with Timer() as t:
for x in xrange(self.numberofloops):
self.board.duplicateboard()
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
def test_move(self):
with Timer() as t:
for x in xrange(self.numberofloops/2):
self.board.move(12, 28)
self.board.move(28, 12)
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
class TestNodeAndTree(unittest.TestCase):
"""Looks at the node and tree in the recursive search."""
def setUp(self):
self.numberofloops = 10
self.node = engine.Node
return None
@unittest.skip("Under redevelopment.")
def test_node_noparent(self):
# Set up the board state.
state = chessboard.ChessBoard()
state[5] = pieces.RookPiece('white')
# Then time it.
with Timer() as t:
for x in xrange(self.numberofloops):
engine.Node(None, (1, 5), state)
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
@unittest.skip("Under redevelopment.")
def test_node_parent(self):
# Make the parent first.
parentstate = chessboard.ChessBoard()
parentstate[1] = pieces.RookPiece('white')
parent = engine.Node(None, (0, 1), parentstate)
# Set up the board state.
state = chessboard.ChessBoard()
state[5] = pieces.RookPiece('white')
# Then time it.
with Timer() as t:
for x in xrange(self.numberofloops):
engine.Node(parent, (1, 5), state)
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
@unittest.skip("Under redevelopment.")
def test_tree_initialise(self):
with Timer() as t:
for x in xrange(self.numberofloops):
engine.TreeStructure()
print '\n\t=> elapsed time for %i loops: %s s' % (self.numberofloops, t.secs)
return None
@unittest.skip("Under redevelopment.")
def test_tree_addnode(self):
# Initilise Tree
tree = engine.TreeStructure()
# Make the parent first.
parentstate = chessboard.ChessBoard()
parentstate[1] = pieces.RookPiece('white')
parent = engine.Node(None, (0, 1), parentstate)
# Set up the board state.
state = chessboard.ChessBoard()
state[5] = pieces.RookPiece('white')
child = engine.Node(parent, (1, 5), state)
with Timer() as t:
for x in xrange(10000):
tree.addnode(child)
print '\n\t=> elapsed time for 10000 loops: %s s' % t.secs
return None
class TestEngine(unittest.TestCase):
"""Looks at the search and evaluate of the engine and how fast it is."""
def setUp(self):
self.search = engine.ChessEngine()
return None
def test_search(self):
return None
def test_evaluate(self):
return None
if __name__ == '__main__':
unittest.main(verbosity=2)
|
kmiller96/Quark-ChessEngine
|
tests/test_performance.py
|
Python
|
mit
| 7,450
|
"""
This is a simple example on how to use Flask and Asynchronous RPC calls.
I kept this simple, but if you want to use this properly you will need
to expand the concept.
Things that are not included in this example.
- Reconnection strategy.
- Consider implementing utility functionality for checking and getting
responses.
def has_response(correlation_id)
def get_response(correlation_id)
Apache/wsgi configuration.
- Each process you start with apache will create a new connection to
RabbitMQ.
- I would recommend depending on the size of the payload that you have
about 100 threads per process. If the payload is larger, it might be
worth to keep a lower thread count per process.
For questions feel free to email me: me@eandersson.net
"""
__author__ = 'eandersson'
import threading
from time import sleep
from flask import Flask
import amqpstorm
from amqpstorm import Message
app = Flask(__name__)
class RpcClient(object):
"""Asynchronous Rpc client."""
def __init__(self, host, username, password, rpc_queue):
self.queue = {}
self.host = host
self.username = username
self.password = password
self.channel = None
self.connection = None
self.callback_queue = None
self.rpc_queue = rpc_queue
self.open()
def open(self):
"""Open Connection."""
self.connection = amqpstorm.Connection(self.host, self.username,
self.password)
self.channel = self.connection.channel()
self.channel.queue.declare(self.rpc_queue)
result = self.channel.queue.declare(exclusive=True)
self.callback_queue = result['queue']
self.channel.basic.consume(self._on_response, no_ack=True,
queue=self.callback_queue)
self._create_process_thread()
def _create_process_thread(self):
"""Create a thread responsible for consuming messages in response
to RPC requests.
"""
thread = threading.Thread(target=self._process_data_events)
thread.setDaemon(True)
thread.start()
def _process_data_events(self):
"""Process Data Events using the Process Thread."""
self.channel.start_consuming(to_tuple=False)
def _on_response(self, message):
"""On Response store the message with the correlation id in a local
dictionary.
"""
self.queue[message.correlation_id] = message.body
def send_request(self, payload):
# Create the Message object.
message = Message.create(self.channel, payload)
message.reply_to = self.callback_queue
# Create an entry in our local dictionary, using the automatically
# generated correlation_id as our key.
self.queue[message.correlation_id] = None
# Publish the RPC request.
message.publish(routing_key=self.rpc_queue)
# Return the Unique ID used to identify the request.
return message.correlation_id
@app.route('/rpc_call/<payload>')
def rpc_call(payload):
"""Simple Flask implementation for making asynchronous Rpc calls. """
# Send the request and store the requests Unique ID.
corr_id = RPC_CLIENT.send_request(payload)
# Wait until we have received a response.
while RPC_CLIENT.queue[corr_id] is None:
sleep(0.1)
# Return the response to the user.
return RPC_CLIENT.queue[corr_id]
if __name__ == '__main__':
RPC_CLIENT = RpcClient('127.0.0.1', 'guest', 'guest', 'rpc_queue')
app.run()
|
eandersson/python-rabbitmq-examples
|
Flask-examples/amqpstorm_threaded_rpc_client.py
|
Python
|
gpl-3.0
| 3,624
|
from django import template
register = template.Library()
|
aksh1/wagtail-cookiecutter-foundation
|
{{cookiecutter.repo_name}}/pages/templatetags/pages_tags.py
|
Python
|
mit
| 58
|
# -*- coding: utf-8 -*-
"""
gspread.urls
~~~~~~~~~~~~
Google API urls.
"""
SPREADSHEETS_API_V4_BASE_URL = 'https://sheets.googleapis.com/v4/spreadsheets'
SPREADSHEET_URL = SPREADSHEETS_API_V4_BASE_URL + '/%s'
SPREADSHEET_BATCH_UPDATE_URL = SPREADSHEETS_API_V4_BASE_URL + '/%s:batchUpdate'
SPREADSHEET_VALUES_URL = SPREADSHEETS_API_V4_BASE_URL + '/%s/values/%s'
SPREADSHEET_VALUES_APPEND_URL = SPREADSHEET_VALUES_URL + ':append'
SPREADSHEET_VALUES_CLEAR_URL = SPREADSHEET_VALUES_URL + ':clear'
DRIVE_FILES_API_V2_URL = 'https://www.googleapis.com/drive/v2/files'
DRIVE_FILES_UPLOAD_API_V2_URL = ('https://www.googleapis.com'
'/upload/drive/v2/files')
|
LukeMurphey/splunk-google-drive
|
src/bin/google_drive_app/gspread/urls.py
|
Python
|
apache-2.0
| 688
|
# Written by Arno Bakker
# Updated by George Milescu
# see LICENSE.txt for license information
""" Simple definitions for the Tribler Core. """
import os
DLSTATUS_ALLOCATING_DISKSPACE = 0 # TODO: make sure this get set when in this alloc mode
DLSTATUS_WAITING4HASHCHECK = 1
DLSTATUS_HASHCHECKING = 2
DLSTATUS_DOWNLOADING = 3
DLSTATUS_SEEDING = 4
DLSTATUS_STOPPED = 5
DLSTATUS_STOPPED_ON_ERROR = 6
DLSTATUS_REPEXING = 7
dlstatus_strings = ['DLSTATUS_ALLOCATING_DISKSPACE',
'DLSTATUS_WAITING4HASHCHECK',
'DLSTATUS_HASHCHECKING',
'DLSTATUS_DOWNLOADING',
'DLSTATUS_SEEDING',
'DLSTATUS_STOPPED',
'DLSTATUS_STOPPED_ON_ERROR',
'DLSTATUS_REPEXING']
UPLOAD = 'up'
DOWNLOAD = 'down'
DLMODE_NORMAL = 0
DLMODE_VOD = 1
DLMODE_SVC = 2 # Ric: added download mode for Scalable Video Coding (SVC)
PERSISTENTSTATE_CURRENTVERSION = 3
"""
V1 = SwarmPlayer 1.0.0
V2 = Tribler 4.5.0: SessionConfig: Added NAT fields
V3 = SessionConfig: Added multicast_local_peer_discovery,
Removed rss_reload_frequency + rss_check_frequency.
For details see API.py
"""
STATEDIR_ITRACKER_DIR = 'itracker'
STATEDIR_DLPSTATE_DIR = 'dlcheckpoints'
STATEDIR_PEERICON_DIR = 'icons'
STATEDIR_TORRENTCOLL_DIR = 'collected_torrent_files'
# 13-04-2010, Andrea: subtitles collecting dir default
STATEDIR_SUBSCOLL_DIR = 'collected_subtitles_files'
STATEDIR_SESSCONFIG = 'sessconfig.pickle'
STATEDIR_SEEDINGMANAGER_DIR = 'seeding_manager_stats'
PROXYSERVICE_DESTDIR = 'proxyservice'
# For observer/callback mechanism, see Session.add_observer()
# subjects
NTFY_PEERS = 'peers'
NTFY_TORRENTS = 'torrents'
NTFY_PREFERENCES = 'preferences'
NTFY_SUPERPEERS = 'superpeers' # use NTFY_PEERS !!
NTFY_FRIENDS = 'friends' # use NTFY_PEERS !!
NTFY_MYPREFERENCES = 'mypreferences' # currently not observable
NTFY_BARTERCAST = 'bartercast' # currently not observable
NTFY_MYINFO = 'myinfo'
NTFY_SEEDINGSTATS = 'seedingstats'
NTFY_SEEDINGSTATSSETTINGS = 'seedingstatssettings'
NTFY_VOTECAST = 'votecast'
NTFY_CHANNELCAST = 'channelcast'
# this corresponds to the event of a peer advertising
# new rich metadata available (for now just subtitles)
NTFY_RICH_METADATA = 'rich_metadata'
# this corresponds to the event of a subtitle file (the actual .srt)
# received from a remote peer
NTFY_SUBTITLE_CONTENTS = 'subtitles_in'
NTFY_SEARCH = 'clicklogsearch' # BuddyCast 4
NTFY_TERM= 'clicklogterm'
NTFY_GAMECAST = 'gamecast'
# non data handler subjects
NTFY_ACTIVITIES = 'activities' # an activity was set (peer met/dns resolved)
NTFY_REACHABLE = 'reachable' # the Session is reachable from the Internet
NTFY_PROXYDOWNLOADER = "proxydownloader" # the proxydownloader object was created
NTFY_PROXYDISCOVERY= "proxydiscovery" # a new proxy was discovered
# ProxyService 90s Test_
NTFY_GUI_STARTED = "guistarted"
# _ProxyService 90s Test
NTFY_DISPERSY = 'dispersy' # an notification regarding dispersy
# changeTypes
NTFY_UPDATE = 'update' # data is updated
NTFY_INSERT = 'insert' # new data is inserted
NTFY_DELETE = 'delete' # data is deleted
NTFY_SEARCH_RESULT = 'search_result' # new search result
NTFY_CONNECTION = 'connection' # connection made or broken
NTFY_STARTED = 'started'
# object IDs for NTFY_ACTIVITIES subject
NTFY_ACT_NONE = 0
NTFY_ACT_UPNP = 1
NTFY_ACT_REACHABLE = 2
NTFY_ACT_GET_EXT_IP_FROM_PEERS = 3
NTFY_ACT_MEET = 4
NTFY_ACT_GOT_METADATA = 5
NTFY_ACT_RECOMMEND = 6
NTFY_ACT_DISK_FULL = 7
NTFY_ACT_NEW_VERSION = 8
NTFY_ACT_ACTIVE = 9
# Disk-allocation policies for download, see DownloadConfig.set_alloc_type
DISKALLOC_NORMAL = 'normal'
DISKALLOC_BACKGROUND = 'background'
DISKALLOC_PREALLOCATE = 'pre-allocate'
DISKALLOC_SPARSE = 'sparse'
# UPnP modes, see SessionConfig.set_upnp_mode
UPNPMODE_DISABLED = 0
UPNPMODE_WIN32_HNetCfg_NATUPnP = 1
UPNPMODE_WIN32_UPnP_UPnPDeviceFinder = 2
UPNPMODE_UNIVERSAL_DIRECT = 3
# Buddycast Collecting Policy parameters
BCCOLPOLICY_SIMPLE = 1
# BCCOLPOLICY_T4T = 2 # Future work
# Internal tracker scrape
ITRACKSCRAPE_ALLOW_NONE = 'none'
ITRACKSCRAPE_ALLOW_SPECIFIC = 'specific'
ITRACKSCRAPE_ALLOW_FULL = 'full'
ITRACKDBFORMAT_BENCODE = 'bencode'
ITRACKDBFORMAT_PICKLE= 'pickle'
ITRACKMULTI_ALLOW_NONE = 'none'
ITRACKMULTI_ALLOW_AUTODETECT = 'autodetect'
ITRACKMULTI_ALLOW_ALL = 'all'
ITRACK_IGNORE_ANNOUNCEIP_NEVER = 0
ITRACK_IGNORE_ANNOUNCEIP_ALWAYS = 1
ITRACK_IGNORE_ANNOUNCEIP_IFNONATCHECK = 2
# ProxyService
PROXYSERVICE_DOE_OBJECT = "doe-obj"
PROXYSERVICE_PROXY_OBJECT = "proxy-obj"
PROXYSERVICE_ROLE_DOE = 'doe-role'
PROXYSERVICE_ROLE_PROXY = 'proxy-role'
PROXYSERVICE_ROLE_NONE = 'none-role'
DOE_MODE_OFF = 0
DOE_MODE_PRIVATE = 1
DOE_MODE_SPEED= 2
PROXYSERVICE_OFF = 0
PROXYSERVICE_ON = 1
# Methods for authentication of the source in live streaming
LIVE_AUTHMETHOD_NONE = "None" # No auth, also no abs. piece nr. or timestamp.
LIVE_AUTHMETHOD_ECDSA = "ECDSA" # Elliptic Curve DSA signatures
LIVE_AUTHMETHOD_RSA = "RSA" # RSA signatures
# Video-On-Demand / live events
VODEVENT_START = "start"
VODEVENT_PAUSE = "pause"
VODEVENT_RESUME = "resume"
# Friendship messages
F_REQUEST_MSG = "REQ"
F_RESPONSE_MSG = "RESP"
F_FORWARD_MSG = "FWD" # Can forward any type of other friendship message
# States for a friend
FS_NOFRIEND = 0
FS_MUTUAL = 1
FS_I_INVITED = 2
FS_HE_INVITED = 3
FS_I_DENIED = 4
FS_HE_DENIED = 5
P2PURL_SCHEME = "tribe" # No colon
URL_MIME_TYPE = 'text/x-url'
TSTREAM_MIME_TYPE = "application/x-ns-stream"
TRIBLER_TORRENT_EXT = ".tribe" # Unused
# Infohashes are always 20 byte binary strings
INFOHASH_LENGTH = 20
|
egbertbouman/tribler-g
|
Tribler/Core/simpledefs.py
|
Python
|
lgpl-2.1
| 5,719
|
import numpy as np
from numpy.random import randn
from numpy.testing import assert_almost_equal, dec
from dipy.reconst.vec_val_sum import vec_val_vect
def make_vecs_vals(shape):
return randn(*(shape)), randn(*(shape[:-2] + shape[-1:]))
try:
np.einsum
except AttributeError:
with_einsum = dec.skipif(True, "Need einsum for benchmark")
else:
def with_einsum(f): return f
@with_einsum
def test_vec_val_vect():
for shape0 in ((10,), (100,), (10, 12), (12, 10, 5)):
for shape1 in ((3, 3), (4, 3), (3, 4)):
shape = shape0 + shape1
evecs, evals = make_vecs_vals(shape)
res1 = np.einsum('...ij,...j,...kj->...ik', evecs, evals, evecs)
assert_almost_equal(res1, vec_val_vect(evecs, evals))
def dumb_sum(vecs, vals):
N, rows, cols = vecs.shape
res2 = np.zeros((N, rows, rows))
for i in range(N):
Q = vecs[i]
L = vals[i]
res2[i] = np.dot(Q, np.dot(np.diag(L), Q.T))
return res2
def test_vec_val_vect_dumber():
for shape0 in ((10,), (100,)):
for shape1 in ((3, 3), (4, 3), (3, 4)):
shape = shape0 + shape1
evecs, evals = make_vecs_vals(shape)
res1 = dumb_sum(evecs, evals)
assert_almost_equal(res1, vec_val_vect(evecs, evals))
|
nilgoyyou/dipy
|
dipy/reconst/tests/test_vec_val_vect.py
|
Python
|
bsd-3-clause
| 1,302
|
# -*- coding: utf-8 -*-
import pytest
from model_mommy import mommy
from oauth2_provider.models import (
get_access_token_model,
get_application_model
)
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory, force_authenticate
from django_toolkit import shortcuts
Application = get_application_model()
AccessToken = get_access_token_model()
@pytest.mark.django_db
class TestGetCurrentApp(object):
@pytest.fixture
def application(self):
return mommy.make(Application)
@pytest.fixture
def token(self, application):
return mommy.make(AccessToken, application=application)
def test_should_return_the_client_applicaton(self, token):
factory = APIRequestFactory()
request = factory.get('/')
force_authenticate(request, token=token)
rest_request = Request(request)
app = shortcuts.get_oauth2_app(rest_request)
assert isinstance(app, Application)
assert app == token.application
def test_should_return_none_when_not_authenticated(self):
factory = APIRequestFactory()
request = factory.get('/')
rest_request = Request(request)
app = shortcuts.get_oauth2_app(rest_request)
assert app is None
|
luizalabs/django-toolkit
|
tests/test_shortcuts.py
|
Python
|
mit
| 1,277
|
#
"""
"""
import os
import json
try:
import vtk
has_vtk = True
except ImportError:
has_vtk = False
def convert(data, in_format, out_format='json'):
"""
"""
if in_format == 'json':
pnm = json_to_json(data)
elif in_format == 'dat':
pnm = imperial_to_json(data)
elif in_format == 'vtp':
pnm = openpnm_to_json(data)
return pnm
def json_to_json(fname):
"""
"""
with open(fname, 'r') as in_file:
pnm = in_file.read()
return pnm
def imperial_to_json(fname):
"""
http://www3.imperial.ac.uk/pls/portallive/docs/1/11280.PDF
"""
ext = '.dat'
froot = os.path.dirname(fname)
fsuffix = ['_node1', '_node2', '_link1', '_link2']
fprefix = os.path.basename(fname).split('_')[0]
flist = [os.path.join(froot, fprefix + i + ext) for i in fsuffix]
fmiss = [int(not i) for i in map(os.path.isfile, flist)]
fparse = os.path.join(froot, fprefix)
# Throw an exception if any file is missing
if any(fmiss):
raise Exception(
'Missing file(s): ' + ', '.join([i for i, t in zip(flist, fbool)
if t]))
else:
pnm = read_imperial(fparse)
return pnm
def openpnm_to_json(fname):
"""
"""
if not has_vkt:
raise Exception("VTK is not installed")
pnm = read_openpnm(fname)
return pnm
def read_openpnm(fname):
"""
"""
# Setup reader for VTP file
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(fname)
reader.Update()
# Grab the polydata
polydata = vtk.vtkPolyData()
polydata.ShallowCopy(reader.GetOutput())
# The points have the pore location ...
npoints = polydata.GetNumberOfPoints()
# ... and the cells have the throat connections
ncells = polydata.GetNumberOfCells()
# The radius comes from the data array
pointradius = polydata.GetPointData().GetArray('pore.net_diameter')
lineradius = polydata.GetCellData().GetArray('throat.net_diameter')
pnm = dict({'pores': [], 'throats': [], })
for i in xrange(npoints):
point = polydata.GetPoint(i)
pore = {
'index': i,
'location': point,
'radius': pointradius.GetTuple(i)[0]/2,
}
pnm['pores'].append(pore)
for i in xrange(ncells):
cell = polydata.GetCell(i)
throat = {
'index': i,
'pores': [cell.GetPointId(0), cell.GetPointId(1)],
'radius': lineradius.GetTuple(i)[0]/2,
}
pnm['throats'].append(throat)
return pnm
def read_imperial(fprefix, ext='dat'):
"""Read PNM models from *_node1, *_node2, *_link1 and *_link2 files
"""
d = {'fprefix': fprefix, 'ext': ext}
code = '{fprefix}{}.{ext}'
npores, size, nodes, coord, pconn, tconn, iflag, oflag = read_node1(
code.format('_node1', **d))
povol, clvol, inrad, shape = read_node2(code.format('_node2', **d))
nthroats, pind0, pind1, tshap, trad, ttlen = read_link1(
code.format('_link1', **d))
plen0, plen1, tlen, ncvol, clvol = read_link2(code.format('_link2', **d))
pnm = dict({'pores': [], 'throats': [], })
for i in xrange(npores):
pore = {
'index': i,
'location': tuple(nodes[i]),
'radius': inrad[i],
}
pnm['pores'].append(pore)
for i in xrange(nthroats):
throat = {
'index': i,
'pores': [pind0[i], pind1[i]],
'radius': trad[i],
}
pnm['throats'].append(throat)
return pnm
def read_node1(fname):
"""Read *_node1 file
"""
with open(fname, 'r') as f:
# Parsing the header containing the total number of pores and the size of the model
line = f.readline().strip().split()
# 1st element: pore index number and model size
npores = int(line[0])
size = tuple(float(i) for i in line[1:])
# Initializing variables
nodes = []
coord = []
pconn = []
tconn = []
iflag = []
oflag = []
# Loop over the lines
for iline, line in enumerate(f.readlines()):
line = line.strip().split()
# 2nd - 4th elements: pore location (x, y, z)
nodes.append([float(i) for i in line[1:4]])
# 5th: pore cordinate number
ncoord = int(line[4])
coord.append(ncoord)
# 6th - NCOORD: pore connected index
slc = slice(5, 4 + ncoord, 1) if ncoord != 1 else slice(5, 6, 1)
pconn.append([int(i) for i in line[slc]])
# NCOORD + 6: pore inlet flag
iflag.append(bool(int(line[5 + ncoord])))
# NCOORD + 7: pore outlet flag
oflag.append(bool(int(line[6 + ncoord])))
# 2*NCOORD + 6 - 2*NCOORD + 7: throat connected index
slc = slice(7 + ncoord, 10 + ncoord,
1) if ncoord != 1 else slice(7 + ncoord, 8 + ncoord, 1)
tconn.append([int(i) for i in line[slc]])
return npores, size, nodes, coord, pconn, tconn, iflag, oflag
def read_node2(fname):
"""Read *_node2 file
"""
with open(fname, 'r') as f:
# Initializing variables
povol = []
clvol = []
inrad = []
shape = []
# Loop over the lines
for iline, line in enumerate(f.readlines()):
# 1st element: pore index number
line = line.strip().split()
# 2nd element: non-clay volume
povol.append(float(line[1]))
# 3rd element: inscribed radius
inrad.append(float(line[2]))
# 4th element: shape factor
shape.append(float(line[3]))
# 5th element: clay volume
clvol.append(float(line[4]))
return povol, clvol, inrad, shape
def read_link1(fname):
"""Read *_link1 file
"""
with open(fname, 'r') as f:
# Parsing the header containing the total number of pores and the size of the model
line = f.readline().strip().split()
# 1st element: throat index number
nthroats = int(line[0])
# Initializing variables
pind0 = []
pind1 = []
tshap = []
trad = []
ttlen = []
# Loop over the lines
for iline, line in enumerate(f.readlines()):
line = line.strip().split()
# 2nd element: initial pore index
pind0.append(int(line[1]))
# 3rd element: terminal pore index
pind1.append(int(line[2]))
# 4th element: throat radius
trad.append(float(line[3]))
# 5th element: throat shape factor
tshap.append(float(line[4]))
# 6th element: throat total length
ttlen.append(float(line[5]))
return nthroats, pind0, pind1, tshap, trad, ttlen
def read_link2(fname):
"""Read *_link2 file
"""
with open(fname, 'r') as f:
# Initializing variables
plen0 = []
plen1 = []
tlen = []
ncvol = []
clvol = []
# Loop over the lines
for iline, line in enumerate(f.readlines()):
line = line.strip().split()
# 1st element: initial pore length
plen0.append(float(line[3]))
# 2nd element: terminal pore length
plen1.append(float(line[4]))
# 3rd element: throat length
tlen.append(float(line[5]))
# 4th element: non-clay throat volume
ncvol.append(float(line[6]))
# 5th element: clay throat volume
clvol.append(float(line[7]))
return plen0, plen1, tlen, ncvol, clvol
def write_json(fname, d, indent=None):
with open(fname, 'w') as f:
return f.write(json.dumps(d, sort_keys=True, separators=(",", ":")))
|
oliveirarodolfo/ipnm
|
ipnm/format_converter.py
|
Python
|
mit
| 7,923
|
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'g:i A'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
# NUMBER_GROUPING =
|
sametmax/Django--an-app-at-a-time
|
ignore_this_directory/django/conf/locale/hi/formats.py
|
Python
|
mit
| 684
|
from flask import Flask, request, jsonify, send_from_directory
import os
import uuid
import shutil
import psycopg2
import urlparse
import json
from psycopg2.extras import Json
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
db = conn.cursor()
psycopg2.extensions.register_adapter(dict, Json)
app = Flask(__name__)
UPLOAD_FOLDER = "uploads/"
@app.route("/")
def index():
return send_from_directory('static/', 'index.html')
@app.route("/<path:path>")
def serve_static_files(path):
return send_from_directory('static/', path)
@app.route("/add-debug-file")
def add_debug_file():
if not os.path.isdir(UPLOAD_FOLDER):
os.mkdir(UPLOAD_FOLDER)
filename = uuid.uuid4().__str__()
open(os.path.join(UPLOAD_FOLDER, filename), 'a').close()
file_desc = {"filename": filename, "title": filename}
db.execute("INSERT INTO files VALUES(%s)", [file_desc])
conn.commit()
return jsonify(file_desc)
@app.route("/sounds")
def get_sounds_list():
db.execute("""SELECT * FROM files""")
rows = db.fetchall()
_sounds = []
for row in rows:
_sounds.append(row[0])
return jsonify({'sounds': _sounds})
@app.route("/sounds/<path:path>")
def serve_static(path):
return send_from_directory(UPLOAD_FOLDER, path)
@app.route("/upload", methods=["POST"])
def upload_file():
file = request.files["file"]
info = json.JSONDecoder().decode(request.values["info"])
if file:
if not os.path.isdir(UPLOAD_FOLDER):
os.mkdir(UPLOAD_FOLDER)
filename = uuid.uuid4().__str__()
file.save(os.path.join(UPLOAD_FOLDER, filename))
if not info['title']:
info['title'] = 'Amazing recording'
file_desc = {"filename": filename, "title": info['title']}
db.execute("INSERT INTO files VALUES(%s)", [file_desc])
conn.commit()
return filename + "\n"
@app.route("/del")
def delete():
try:
shutil.rmtree(UPLOAD_FOLDER)
db.execute("""DELETE FROM files""")
conn.commit()
return jsonify({'result': 'success'})
except Exception as e:
return jsonify({'result': str(e)})
if __name__ == "__main__":
app.run(host = "0.0.0.0", debug=True)
|
spb201/turbulent-octo-rutabaga-api
|
app.py
|
Python
|
mit
| 2,394
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-03-05 13:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0022_auto_20160227_1637'),
]
operations = [
migrations.AddField(
model_name='talkproposal',
name='result',
field=models.IntegerField(choices=[(0, 'Undecided'), (1, 'Accepted'), (2, 'Rejected')], db_index=True, default=0, verbose_name='review result'),
),
migrations.AddField(
model_name='tutorialproposal',
name='result',
field=models.IntegerField(choices=[(0, 'Undecided'), (1, 'Accepted'), (2, 'Rejected')], db_index=True, default=0, verbose_name='review result'),
),
]
|
pycontw/pycontw2016
|
src/proposals/migrations/0023_auto_20160305_1359.py
|
Python
|
mit
| 829
|
#$Id$
from books.model.PageContext import PageContext
from books.model.Instrumentation import Instrumentation
class TransactionList:
"""This class is used to create object for Transaction list."""
def __init__(self):
"""Initialize parameters for Transaction list object."""
self.transactions = []
self.page_context = PageContext()
self.Instrumentation = Instrumentation()
def set_transactions(self, transaction):
"""Set transactions.
Args:
transaction(instance): Transaction object.
"""
self.transactions.append(transaction)
def get_transactions(self):
"""Get transactions.
Returns:
list of instance: List of transactions object.
"""
return self.transactions
def set_page_context(self, page_context):
"""Set page context.
Args:
page_context(instance): Page context
"""
self.page_context = page_context
def get_page_context(self):
"""Get page context.
Returns:
instance: Page context object.
"""
return self.page_context
def set_instrumentation(self, instrumentation):
"""Set instrumentation.
Args:
instrumentation(instance): Instrumentation object.
"""
self.instrumentation = instrumentation
def get_instrumentation(self):
"""Get instrumentation.
Returns:
instance: Instrumentation object.
"""
return self.instrumentation
|
zoho/books-python-wrappers
|
books/model/TransactionList.py
|
Python
|
mit
| 1,583
|
"""Create a conduit from the available information.
Can try to examine './.arcconfig' and '~/.arcrc' if not enough
information is provided.
"""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# phlsys_makeconduit
#
# Public Classes:
# InsufficientInfoException
#
# Public Functions:
# add_argparse_arguments
# make_conduit
# obscured_cert
# get_uri_user_cert_explanation
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
import difflib
import phlsys_arcconfig
import phlsys_arcrc
import phlsys_conduit
class InsufficientInfoException(Exception):
def __init__(self, message):
super(InsufficientInfoException, self).__init__(message)
def _make_exception(*args):
return InsufficientInfoException("\n" + "\n\n".join(args))
def add_argparse_arguments(parser):
"""Add a 'connection arguments' group to the supplied argparse.parser."""
connection = parser.add_argument_group(
'connection arguments',
'use these optional parameters to override settings present in your\n'
'"~/.arcrc" or ".arcconfig" files')
connection.add_argument(
"--uri",
type=str,
metavar="ADDRESS",
help="address of the phabricator instance to connect to.")
connection.add_argument(
"--user",
type=str,
metavar="NAME",
help="name of the user to connect as.")
connection.add_argument(
"--cert",
type=str,
metavar="HEX",
help="long certificate string of the user to connect as, you can find "
"this string here: "
"http://your.phabricator/settings/panel/conduit/. generally you "
"wouldn't expect to enter this on the command-line and would "
"make an ~/.arcrc file by using '$ arc install-certificate'.")
connection.add_argument(
'--act-as-user',
type=str,
metavar="NAME",
help="name of the user to impersonate (admin only).\n")
def make_conduit(uri=None, user=None, cert=None, act_as_user=None):
uri, user, cert, _ = get_uri_user_cert_explanation(uri, user, cert)
return phlsys_conduit.Conduit(uri, user, cert, act_as_user)
def obscured_cert(cert):
"""Return an obscured version of the supplied 'cert' suitable for display.
:cert: a string of a conduit certificate
:returns: a string of an obscured conduit certificate
"""
return cert[:4] + '...' + cert[-4:]
def get_uri_user_cert_explanation(uri, user, cert):
if uri and user and cert:
explanations = ["all parameters were supplied"]
uri = _fix_uri(explanations, uri)
return uri, user, cert, '\n\n'.join(explanations)
arcrc, arcrc_path = _load_arcrc()
arcconfig_path, arcconfig = _load_arcconfig()
install_arc_url = str(
"http://www.phabricator.com/docs/phabricator/article/"
"Arcanist_User_Guide.html#installing-arcanist")
no_uri = "no uri to a Phabricator instance was specified."
no_user = "no username for the Phabricator instance was specified."
no_cert = "no certificate for the Phabricator instance was specified."
no_arcconfig = (
"couldn't find an .arcconfig, this file should contain "
"the uri for the phabricator instance you wish to connect "
"to.\n"
"we search for it in the current working directory and in "
"the parent directories\n"
"here is an example .arcconfig:\n"
"{\n"
" \"conduit_uri\" : \"https://your.phabricator/\"\n"
"}")
no_arcrc = (
"couldn't find ~/.arcrc, this file should contain "
"usernames and certificates which will allow us to authenticate with "
"Phabricator.\n"
"To generate a valid ~/.arcrc for a particular instance, you may "
"run:\n"
"\n"
"$ arc install-certificate [URI]\n"
"N.B. to install arc:\n" + install_arc_url)
bad_arcrc = (
"can't load .arcrc, it may be invalid json or not permissioned\n"
"path used: " + str(arcrc_path))
bad_arcconfig = (
"can't load .arcconfig, it may be invalid json or not permissioned\n"
"path used: " + str(arcconfig_path))
arcrc_no_default = (
"no default uri was discovered in .arcrc, you may add one like so:\n"
"$ arc set-config default https://your.phabricator/\n"
"N.B. to install arc:\n" + install_arc_url)
arcconfig_no_uri = (
".arcconfig doesn't seem to contain a conduit_uri entry\n"
"path used: " + str(arcconfig_path))
explanations = []
# try to discover conduit uri first
if uri is None:
if not arcconfig_path:
if not arcrc_path:
raise _make_exception(no_uri, no_arcconfig, no_arcrc)
if arcrc is None:
raise _make_exception(no_uri, no_arcconfig, bad_arcrc)
if "config" in arcrc:
uri = arcrc["config"].get("default", None)
if uri is None:
raise _make_exception(no_uri, no_arcconfig, arcrc_no_default)
explanations.append(
"got uri from 'default' entry in arcrc\n"
" path: {0}\n"
" uri: {1}".format(arcrc_path, uri))
else: # if arcconfig_path
if arcconfig is None:
raise _make_exception(no_uri, bad_arcconfig)
uri = arcconfig.get("conduit_uri", None)
if uri is None:
raise _make_exception(no_uri, arcconfig_no_uri)
explanations.append(
"got uri from .arcconfig\n"
" path: {0}\n"
" uri: {1}".format(arcconfig_path, uri))
uri = _fix_uri(explanations, uri)
arcrc_no_entry = (
"no entry for the uri was found in .arcrc, you may add one like so:\n"
"$ arc install-certificate " + uri + "\n"
"N.B. to install arc:\n" + install_arc_url)
# try to discover user
if user is None:
if not arcrc_path:
raise _make_exception(no_user, no_arcrc)
if arcrc is None:
raise _make_exception(no_user, bad_arcrc)
if "hosts" in arcrc:
host = phlsys_arcrc.get_host(arcrc, uri)
if host is None:
raise _make_exception(no_user, arcrc_no_entry)
user = host.get("user", None)
explanations.append(
"got user from uri's entry in .arcrc\n"
" path: {0}\n"
" user: {1}".format(arcrc_path, user))
if cert is None:
cert = host.get("cert", None)
explanations.append(
"got cert from uri's entry in .arcrc\n"
" path: {0}\n"
" cert: {1}".format(arcrc_path, obscured_cert(cert)))
if user is None:
raise _make_exception(no_user, arcrc_no_entry)
if user is None:
raise _make_exception(no_user, arcrc_no_entry)
# try to discover cert
if cert is None:
if not arcrc_path:
raise _make_exception(no_cert, no_arcrc)
if arcrc is None:
raise _make_exception(no_cert, bad_arcrc)
if "hosts" in arcrc:
host = phlsys_arcrc.get_host(arcrc, uri)
if host is None:
raise _make_exception(no_cert, arcrc_no_entry)
cert = host.get("cert", None)
explanations.append(
"got cert from uri's entry in .arcrc\n"
" path: {0}\n"
" cert: {1}".format(arcrc_path, obscured_cert(cert)))
if cert is None:
raise _make_exception(no_cert, arcrc_no_entry)
# make a generic statement if we've missed an error case
if not (uri and user and cert) or arcrc_path is None:
raise Exception("unexpected error determinining uri, user or cert")
return uri, user, cert, '\n\n'.join(explanations)
def _load_arcconfig():
# try to load arcconfig, if we can find it
arcconfig_path = phlsys_arcconfig.find_arcconfig()
arcconfig = None
try:
if arcconfig_path is not None:
arcconfig = phlsys_arcconfig.load(arcconfig_path)
except ValueError:
pass
except EnvironmentError:
pass
return arcconfig_path, arcconfig
def _load_arcrc():
# try to load arcrc, if we can find it
arcrc_path = phlsys_arcrc.find_arcrc()
arcrc = None
try:
if arcrc_path is not None:
arcrc = phlsys_arcrc.load(arcrc_path)
except ValueError:
pass
except EnvironmentError:
pass
return arcrc, arcrc_path
def _fix_uri(explanations, uri):
old_uri = uri
uri = phlsys_conduit.make_conduit_uri(uri)
if uri != old_uri:
diff = list(difflib.Differ().compare([old_uri], [uri]))
diff = [' ' + s.strip() for s in diff]
diff = '\n'.join(diff)
explanations.append("assumed uri to conduit:\n{0}".format(diff))
return uri
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
valhallasw/phabricator-tools
|
py/phl/phlsys_makeconduit.py
|
Python
|
apache-2.0
| 10,069
|
from django.db import models
from django.db.models import Count
from belt.managers import SearchQuerySetMixin
class PostQuerySet(SearchQuerySetMixin, models.QuerySet):
pass
class CategoryQuerySet(SearchQuerySetMixin, models.QuerySet):
pass
class BlogQuerySet(SearchQuerySetMixin, models.QuerySet):
def annotate_total_posts(self):
return self.annotate(total_posts=Count("posts"))
|
marcosgabarda/django-belt
|
tests/app/managers.py
|
Python
|
mit
| 406
|
bot_user = 'U041TJU13'
irc_channel = 'C040NNZHT'
outputs = []
def process_message(data):
message_text = data.get('text')
if message_text:
if (bot_user in message_text) and ('source' in message_text):
outputs.append([irc_channel, '`https://github.com/paulnurkkala/comm-slackbot`'])
|
paulnurkkala/comm-slackbot
|
plugins/get_source/get_source.py
|
Python
|
gpl-2.0
| 297
|
# NOTE: For xVideos use the direct link, which should look something like this: "http://www.xvideos.com/video123456/blah_blah_blah"
# Plugin for gallery_get.
import re
# Each definition can be one of the following:
# - a string
# - a regex string
# - a function that takes source as a parameter and returns an array or a string. (You may assume that re and urllib are already imported.)
# If you comment out a parameter, it will use the default defined in __init__.py
# identifier (default = name of this plugin after "plugin_") : If there's a match, we'll attempt to download images using this plugin.
identifier = "xvideos.com/video"
# title: parses the gallery page for a title. This will be the folder name of the output gallery.
title = r"setVideoTitle\('(.+?)'"
# redirect: if the links in the gallery page go to an html instead of an image, use this to parse the gallery page.
# direct_links: if redirect is non-empty, this parses each redirect page for a single image. Otherwise, this parses the gallery page for all images.
direct_links = r"setVideoUrlHigh\('(\S+?)'"
# same_filename (default=False): if True, uses filename specified on remote link. Otherwise, creates own filename with incremental index.
same_filename = False
|
regosen/gallery_get
|
gallery_plugins/plugin_xVideos.py
|
Python
|
mit
| 1,251
|
from importlib import import_module
from django.apps import AppConfig as BaseAppConfig
class AppConfig(BaseAppConfig):
name = "ocp"
def ready(self):
import_module("ocp.receivers")
|
FreedomCoop/valuenetwork
|
ocp/apps.py
|
Python
|
agpl-3.0
| 201
|
from typing import Optional, Sequence
from waitlist.storage.database import HistoryEntry, Shipfit
def create_history_object(target_id: int, event_type: str, source_id: Optional[int] = None,
fitlist: Optional[Sequence[Shipfit]] = None) -> HistoryEntry:
h_entry = HistoryEntry()
h_entry.sourceID = source_id
h_entry.targetID = target_id
h_entry.action = event_type
if fitlist is not None:
for fit in fitlist:
h_entry.fittings.append(fit)
return h_entry
|
SpeedProg/eve-inc-waitlist
|
waitlist/utility/history_utils.py
|
Python
|
mit
| 524
|
#!/usr/bin/env python
# This is only the automatic generated test file for ../restack.py
# This must be filled with real tests and this commentary
# must be cleared.
# If you want to help, read the python unittest documentation:
# http://docs.python.org/library/unittest.html
import sys
sys.path.append('..') # this line allows to import the extension code
import unittest
from restack import *
class RestackBasicTest(unittest.TestCase):
#def setUp(self):
def test_run_without_parameters(self):
args = [ 'minimal-blank.svg' ]
e = Restack()
e.affect( args, False )
#self.assertEqual( e.something, 'some value', 'A commentary about that.' )
if __name__ == '__main__':
unittest.main()
|
piksels-and-lines-orchestra/inkscape
|
share/extensions/test/restack.test.py
|
Python
|
gpl-2.0
| 711
|
# Copyright (c) 2008 Joost Cassee
# Licensed under the terms of the MIT License (see LICENSE.txt)
"""
This TinyMCE widget was copied and extended from this code by John D'Agostino:
http://code.djangoproject.com/wiki/CustomWidgetsTinyMCE
"""
import json
from django import forms
from django.conf import settings
from django.contrib.admin import widgets as admin_widgets
from django.core.urlresolvers import reverse
from django.forms.widgets import flatatt
try:
from django.utils.encoding import smart_text as smart_unicode
except ImportError:
try:
from django.utils.encoding import smart_unicode
except ImportError:
from django.forms.util import smart_unicode
from django.utils.html import escape
try:
from django.utils.datastructures import SortedDict
except ImportError:
from collections import OrderedDict as SortedDict
from django.utils.safestring import mark_safe
from django.utils.translation import get_language, ugettext as _
import tinymce.settings
from tinymce.profiles import DEFAULT as DEFAULT_PROFILE
class TinyMCE(forms.Textarea):
"""
TinyMCE widget. Set settings.TINYMCE_JS_URL to set the location of the
javascript file. Default is "MEDIA_URL + 'js/tiny_mce/tiny_mce.js'".
You can customize the configuration with the mce_attrs argument to the
constructor.
In addition to the standard configuration you can set the
'content_language' parameter. It takes the value of the 'language'
parameter by default.
In addition to the default settings from settings.TINYMCE_DEFAULT_CONFIG,
this widget sets the 'language', 'directionality' and
'spellchecker_languages' parameters by default. The first is derived from
the current Django language, the others from the 'content_language'
parameter.
"""
def __init__(self, content_language=None, attrs=None, mce_attrs=None, profile=None):
super(TinyMCE, self).__init__(attrs)
if mce_attrs is None:
mce_attrs = {}
self.mce_attrs = mce_attrs
if content_language is None:
content_language = mce_attrs.get('language', None)
self.content_language = content_language
self.profile = profile or DEFAULT_PROFILE
def render(self, name, value, attrs=None):
if value is None: value = ''
value = smart_unicode(value)
final_attrs = self.build_attrs(attrs)
final_attrs['name'] = name
assert 'id' in final_attrs, "TinyMCE widget attributes must contain 'id'"
mce_config = self.profile.copy()
#mce_config.update(get_language_config(self.content_language))
#if tinymce.settings.USE_FILEBROWSER:
#mce_config['file_browser_callback'] = "djangoFileBrowser"
mce_config.update(self.mce_attrs)
mce_config['selector'] = '#%s' % final_attrs['id']
# Fix for js functions
#js_functions = {}
#for k in ('paste_preprocess','paste_postprocess'):
#if k in mce_config:
#js_functions[k] = mce_config[k]
#del mce_config[k]
mce_json = json.dumps(mce_config)
#for k in js_functions:
#index = mce_json.rfind('}')
#mce_json = mce_json[:index]+', '+k+':'+js_functions[k].strip()+mce_json[index:]
if mce_config.get('inline', False):
html = [u'<div%s>%s</div>' % (flatatt(final_attrs), escape(value))]
else:
html = [u'<textarea%s>%s</textarea>' % (flatatt(final_attrs), escape(value))]
html.append(u'<script type="text/javascript">tinyMCE.init(%s)</script>' % mce_json)
return mark_safe(u'\n'.join(html))
def _media(self):
if tinymce.settings.USE_COMPRESSOR:
js = [reverse('tinymce-compressor')]
else:
js = [tinymce.settings.JS_URL]
if tinymce.settings.USE_FILEBROWSER:
js.append(reverse('tinymce-filebrowser'))
return forms.Media(js=js)
media = property(_media)
class AdminTinyMCE(admin_widgets.AdminTextareaWidget, TinyMCE):
pass
def get_language_config(content_language=None):
language = get_language()[:2]
if content_language:
content_language = content_language[:2]
else:
content_language = language
config = {}
config['language'] = language
lang_names = SortedDict()
for lang, name in settings.LANGUAGES:
if lang[:2] not in lang_names: lang_names[lang[:2]] = []
lang_names[lang[:2]].append(_(name))
sp_langs = []
for lang, names in lang_names.items():
if lang == content_language:
default = '+'
else:
default = ''
sp_langs.append(u'%s%s=%s' % (default, ' / '.join(names), lang))
config['spellchecker_languages'] = ','.join(sp_langs)
if content_language in settings.LANGUAGES_BIDI:
config['directionality'] = 'rtl'
else:
config['directionality'] = 'ltr'
if tinymce.settings.USE_SPELLCHECKER:
config['spellchecker_rpc_url'] = reverse('tinymce.views.spell_check')
return config
|
dani0805/django-tinymce4
|
tinymce/widgets.py
|
Python
|
mit
| 5,087
|
VERSION = (0, 7, 1, 'final', 0)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
sub = ''
if VERSION[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[VERSION[3]] + str(VERSION[4])
return version + sub
|
nagyistoce/geokey
|
geokey/version.py
|
Python
|
apache-2.0
| 350
|
from __future__ import absolute_import
from .base import Decider, DeciderPoller, DeciderWorker # NOQA # isort:skip
from . import command # NOQA # isort:skip
|
botify-labs/simpleflow
|
simpleflow/swf/process/decider/__init__.py
|
Python
|
mit
| 162
|
## -*- coding: utf-8 -*-
import logging
from os import environ
from subprocess import call
from createhdr.createhdr import ReadTif
from os.path import join, dirname
from celery import shared_task, group
from django.conf import settings
from django.contrib.gis.geos import MultiPolygon
from imagery.models import Image
from .models import CatalogoLandsat
logger = logging.getLogger(__name__)
BASE_PATH = dirname(__file__)
PATH_TO_SCRIPT = join(BASE_PATH, 'bin/make_tms.sh')
OUTPUT_PNG_PATH = settings.PNG_IMAGES_PATH
OUTPUT_TMS_PATH = settings.TMS_IMAGES_PATH
LINK_BASE = settings.URL_TMS_BASE
environ['PATH'] += ':'+join(BASE_PATH, 'bin/scripts-for-gis')
environ['PATH'] += ':'+join(BASE_PATH, 'bin/tilers-tools.v32/tilers_tools')
@shared_task
def make_tms(image):
"""Generate the TMS of an Image."""
if (image.type == 'r6g5b4' and image.scene.sat == 'L8') or \
(image.type == 'r5g4b3' and image.scene.sat in ['L5', 'L7']):
if CatalogoLandsat.objects.filter(image=image.name).count() == 0:
logger.debug('Starting process "make_tms" to image %s' % image.name)
call([PATH_TO_SCRIPT, image.file_path(), OUTPUT_PNG_PATH, OUTPUT_TMS_PATH, LINK_BASE])
logger.debug('Process "make_tms" to image %s finished ' % image.name)
CatalogoLandsat.objects.create(
image=image.name,
path=settings.LANDSAT_PATH_FORMAT % (image.scene.sat[-1], image.scene.name),
geom=MultiPolygon(image.scene.geom),
data=image.scene.date,
nuvens=image.scene.cloud_rate,
quicklook=image.scene.quicklook(),
orbita=image.scene.path,
ponto=image.scene.row,
url_tms=join(settings.URL_TMS_BASE, '%s_%s_tms.xml' % (image.scene.name, image.type)))
# create HDR file for the RGB image
logger.debug('Starting creation of HDR file to image %s' % image.name)
tif = ReadTif(image.file_path())
hdr_name = tif.write_hdr()
logger.debug('Creation of HDR file to image %s finished' % image.name)
Image.objects.get_or_create(
name=hdr_name.split('/')[-1],
type='hdr',
scene=image.scene)
else:
logger.info('%s already has TMS' % image.name)
else:
raise Exception('Image is not a Landsat 8 of r6g5b4 type or a Landsat 5/7 of r5g4b3 type.')
@shared_task
def make_tms_all():
'''Search by all images that've never been processed, and after call make_tms function
to process that images'''
type_list = ['r6g5b4', 'r5g4b3']
logger.debug('Quering all items from catalogo')
catalog_images = CatalogoLandsat.objects.values_list('image', flat=True)
logger.debug('Quering images from imagery that weren\'t begot TMS')
images = Image.objects.filter(type__in=type_list).exclude(name__in=catalog_images)
logger.info('Found %s images to make TMS' % images.count())
group([make_tms.s(image) for image in images])()
logger.info('Make all TMS completed')
def create_hdr(image):
if (image.type == 'r6g5b4' and image.scene.sat == 'L8') or \
(image.type == 'r5g4b3' and image.scene.sat in ['L5', 'L7']):
tif = ReadTif(image.file_path())
tif.write_hdr()
|
ibamacsr/indicar-process
|
indicarprocess/catalogo/tasks.py
|
Python
|
agpl-3.0
| 3,329
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The niftyreg module provides classes for interfacing with `niftyreg
<http://sourceforge.net/projects/niftyreg/>`_ command line tools.
These are the base tools for working with niftyreg.
Registration tools are found in niftyreg/reg.py
Every other tool is found in niftyreg/regutils.py
Examples
--------
See the docstrings of the individual classes for examples.
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
from builtins import property, super
from distutils.version import StrictVersion
import os
import shutil
import subprocess
from warnings import warn
from ..base import CommandLine, CommandLineInputSpec, traits, Undefined
from ...utils.filemanip import split_filename
def get_custom_path(command, env_dir='NIFTYREGDIR'):
return os.path.join(os.getenv(env_dir, ''), command)
def no_nifty_package(cmd='reg_f3d'):
try:
return shutil.which(cmd) is None
except AttributeError: # Python < 3.3
return not any(
[os.path.isfile(os.path.join(path, cmd)) and
os.access(os.path.join(path, cmd), os.X_OK)
for path in os.environ["PATH"].split(os.pathsep)])
class NiftyRegCommandInputSpec(CommandLineInputSpec):
"""Input Spec for niftyreg interfaces."""
# Set the number of omp thread to use
omp_core_val = traits.Int(int(os.environ.get('OMP_NUM_THREADS', '1')),
desc='Number of openmp thread to use',
argstr='-omp %i', usedefault=True)
class NiftyRegCommand(CommandLine):
"""
Base support interface for NiftyReg commands.
"""
_suffix = '_nr'
_min_version = '1.5.30'
input_spec = NiftyRegCommandInputSpec
def __init__(self, required_version=None, **inputs):
self.num_threads = 1
super(NiftyRegCommand, self).__init__(**inputs)
self.required_version = required_version
_version = self.get_version()
if _version:
_version = _version.decode("utf-8")
if self._min_version is not None and \
StrictVersion(_version) < StrictVersion(self._min_version):
msg = 'A later version of Niftyreg is required (%s < %s)'
warn(msg % (_version, self._min_version))
if required_version is not None:
if StrictVersion(_version) != StrictVersion(required_version):
msg = 'The version of NiftyReg differs from the required'
msg += '(%s != %s)'
warn(msg % (_version, self.required_version))
self.inputs.on_trait_change(self._omp_update, 'omp_core_val')
self.inputs.on_trait_change(self._environ_update, 'environ')
self._omp_update()
def _omp_update(self):
if self.inputs.omp_core_val:
self.inputs.environ['OMP_NUM_THREADS'] = \
str(self.inputs.omp_core_val)
self.num_threads = self.inputs.omp_core_val
else:
if 'OMP_NUM_THREADS' in self.inputs.environ:
del self.inputs.environ['OMP_NUM_THREADS']
self.num_threads = 1
def _environ_update(self):
if self.inputs.environ:
if 'OMP_NUM_THREADS' in self.inputs.environ:
self.inputs.omp_core_val = \
int(self.inputs.environ['OMP_NUM_THREADS'])
else:
self.inputs.omp_core_val = Undefined
else:
self.inputs.omp_core_val = Undefined
def check_version(self):
_version = self.get_version()
if not _version:
raise Exception('Niftyreg not found')
# Decoding to string:
_version = _version.decode("utf-8")
if StrictVersion(_version) < StrictVersion(self._min_version):
err = 'A later version of Niftyreg is required (%s < %s)'
raise ValueError(err % (_version, self._min_version))
if self.required_version:
if StrictVersion(_version) != StrictVersion(self.required_version):
err = 'The version of NiftyReg differs from the required'
err += '(%s != %s)'
raise ValueError(err % (_version, self.required_version))
def get_version(self):
if no_nifty_package(cmd=self.cmd):
return None
exec_cmd = ''.join((self.cmd, ' -v'))
return subprocess.check_output(exec_cmd, shell=True).strip()
@property
def version(self):
return self.get_version()
def exists(self):
return self.get_version() is not None
def _format_arg(self, name, spec, value):
if name == 'omp_core_val':
self.numthreads = value
return super(NiftyRegCommand, self)._format_arg(name, spec, value)
def _gen_fname(self, basename, out_dir=None, suffix=None, ext=None):
if basename == '':
msg = 'Unable to generate filename for command %s. ' % self.cmd
msg += 'basename is not set!'
raise ValueError(msg)
_, final_bn, final_ext = split_filename(basename)
if out_dir is None:
out_dir = os.getcwd()
if ext is not None:
final_ext = ext
if suffix is not None:
final_bn = ''.join((final_bn, suffix))
return os.path.abspath(os.path.join(out_dir, final_bn + final_ext))
|
mick-d/nipype
|
nipype/interfaces/niftyreg/base.py
|
Python
|
bsd-3-clause
| 5,517
|
############################################################################
# GENERAL MAPPINGS
############################################################################
from lily.contacts.models import Contact
lilyuser_to_owner_mapping = {
3: '', # 'sanne.bakker@voys.nl'
4: 'joris.beltman@voys.nl', # 'joris.beltman@voys.nl'
6: '', # 'werner.bergsma@voys.nl'
10: '', # 'giel.bouwman@voys.nl'
13: 'steven.buurma@voys.nl', # 'steven.buurma@voys.nl'
14: 'miranda.smits@voys.nl', # 'miranda.smits@voys.nl'
23: 'johan.hilverda@voys.nl', # 'johan.hilverda@voys.nl'
24: 'tom.hofstede@voys.nl', # 'tom.hofstede@voys.nl'
33: '', # 'anniek@voys.nl'
35: '', # 'ednan.pasagic@voys.nl'
47: '', # 'nadine.smalbil@voys.nl'
48: '', # 'noe@voys.nl'
59: '', # 'jorg.vletter@voys.nl'
62: 'mark.vletter@voys.nl', # 'mark.vletter@voys.nl'
68: '', # 'dick.wierenga@voys.nl'
69: '', # 'eelco.zwart@voys.nl'
72: '', # 'allard.stijnman@voys.nl'
80: '', # 'ferdy.galema@voys.nl'
81: '', # 'tim@voys.nl'
82: '', # 'dennis.huisman@voys.nl'
84: '', # 'steffie.hendrickx@voys.nl'
85: 'bob.zuidema@voys.nl', # 'bob.zuidema@voys.nl'
86: 'annemieke.doornbos@voys.nl', # 'annemieke.doornbos@voys.nl'
87: 'harriet.zuidland@voys.nl', # 'harriet.zuidland@voys.nl'
88: '', # 'nienke.norden@voys.nl'
89: '', # 'floor.koops-munneke@voys.nl'
90: '', # 'gijs.schuringa@voys.nl'
93: '', # 'bouwina@voys.nl'
94: '', # 'karlien@voys.nl'
97: '', # 'marco.vellinga@voys.nl'
99: '', # 'corinne.kornaat-kersten@voys.nl'
101: '', # 'peter.eigenraam@voys.nl'
103: '', # 'jellemaarten.devries@voys.nl'
105: '', # 'jeroen.renier@voys.nl'
106: '', # 'birgit.timmerman@wearespindle.com'
108: '', # 'redmer.loen@voys.nl'
111: '', # 'chantal@voys.nl'
112: '', # 'lisanne.boersma@voys.nl'
116: 'ben.hoetmer@voys.nl', # 'ben.hoetmer@voys.nl'
119: '', # 'william.ally@voys.nl'
121: '', # 'ednan@wearespindle.com'
122: 'ritske.vanravels@voys.nl', # 'ritske@voys.nl'
123: 'ferdian.frericks@voys.nl', # 'ferdian.frericks@voys.nl'
124: 'richard.grootkarzijn@voys.nl', # 'richard.grootkarzijn@voys.nl'
125: '', # 'gerjan@voys.nl'
126: 'erik.veenstra@voys.nl', # 'erik.veenstra@voys.nl'
129: '', # 'stefan@wearespindle.com'
132: '', # 'eva.moeyersoons@voys.be'
133: 'gerard.verweij@voys.nl', # 'gerard.verweij@voys.nl'
135: '', # 'anja.vanderwoude@voys.nl'
137: '', # 'tom.offringa@wearespindle.com'
138: 'ernest.buikema@voys.nl', # 'ernest.buikema@voys.nl'
143: 'dennis.leenes@voys.nl', # 'dennis.leenes@voys.nl'
152: '', # 'lilyapi@voys.nl'
154: '', # 'janneke.vandervelde@voys.nl'
155: '', # 'flex@voys.nl'
156: '', # 'cornelis.poppema+voys@wearespindle.com'
157: 'jeroen.banus@voys.nl', # 'jeroen.banus@voys.nl'
158: 'arnoud.oosten@voys.nl', # 'arnoud.oosten@voys.nl'
159: '', # 'mark.vanderveen@voys.nl'
160: '', # 'eveline.welling@voys.nl'
162: '', # 'sjoerd@wearespindle.com'
168: 'marloes.vandervelde@voys.nl', # 'marloes.vandervelde@voys.nl'
169: 'wouter.koetze@voys.nl', # 'wouter.koetze@voys.nl'
226: '', # 'arjen@hellolily.com'
234: '', # 'luuk@wearespindle.com'
235: 'nina.morsa@voys.be', # 'nina.morsa@voys.be'
243: '', # 'bob@wearespindle.com'
246: '', # 'zoe.prevoo@voys.nl'
247: 'kirsten.beck@voys.nl', # 'kirsten.beck@voys.nl'
261: '', # 'brenda.kamminga@voys.nl'
262: 'wouter.brem@voys.nl', # 'wouter.brem@voys.nl'
264: '', # 'maureen.deputter@voys.nl'
268: '', # 'tycho.horn@voys.nl'
270: '', # 'redmer+voys@wearespindle.com'
272: '', # 'jonathan.vandenbroek@voys.nl'
279: '', # 'mattijs.jager@voys.nl'
282: '', # 'remi+voys@wearespindle.com'
283: '', # 'patrick.bruinsma@voys.nl'
284: 'irene.bottema@voys.nl', # 'irene.bottema@voys.nl'
324: '', # 'bianca.koenen@voys.nl'
472: '', # 'janpieter@voipgrid.nl'
478: '', # 'lydia.dejong@voys.nl'
534: 'sara.vanhecke@voys.be', # 'sara.vanhecke@voys.be'
553: '', # 'dennis.kloetstra@voys.nl'
637: 'yvanka.hullegie@voys.nl', # 'yvanka.hullegie@voys.nl'
680: 'rianne.plenter@voys.nl', # 'rianne.plenter@voys.nl'
734: 'pollien.vankeulen@voys.nl', # 'pollien.vankeulen@voys.nl'
750: 'wimke.hilvering@voys.nl', # 'wimke.hilvering@voys.nl'
751: 'johan.niemeijer@voys.nl', # 'johan.niemeijer@voys.nl'
781: 'rik.maris@voys.nl', # 'rik.maris@voys.nl'
847: 'niels.groenendaal@voys.nl', # 'niels.groenendaal@voys.nl'
850: '', # 'marjon.paasman@voys.nl'
865: 'peter.westerhof@voys.nl', # 'peter.westerhof@voys.nl'
867: '', # 'janbart.leeuw@voys.nl'
892: '', # 'rik.huijzer+voys@wearespindle.com'
904: '', # 'renske.tans@voys.nl'
968: 'maarten.vanbrussel@voys.nl', # 'maarten.vanbrussel@voys.nl'
996: '', # 'rudolf.michaelis@voys.nl'
997: 'maya.vanderschuit@voys.nl', # 'maya.vanderschuit@voys.nl'
1006: '', # 'marco.vellinga+voys@wearespindle.com'
1009: 'lisette.tigelaar@voys.nl', # 'lisette.tigelaar@voys.nl'
1064: '', # 'pascal.visser@voys.nl'
1094: 'nyna.vaneeks@voys.nl', # 'nyna.vaneeks@voys.nl'
1095: 'jetske.ouddeken@voys.nl', # 'jetske.ouddeken@voys.nl'
1098: '', # 'marloes.vandekamp@voys.nl'
1099: 'sander.bartelds@voys.nl', # 'sander.bartelds@voys.nl'
1105: 'anne.betting@voys.nl', # 'anne.betting@voys.nl'
1106: 'bart.sesselaar@voys.nl', # 'bart.sesselaar@voys.nl'
1107: 'nynke.kleinhorsman@voys.nl', # 'nynke.kleinhorsman@voys.nl'
1108: 'daniel.brouwer@voys.nl', # 'daniel.brouwer@voys.nl'
1112: 'jesse.mendezfonseca@voys.nl', # 'jesse.mendezfonseca@voys.nl'
1113: '', # 'joest.burema+deleted@voys.nl'
1114: 'joest.burema@voys.nl', # 'joest.burema@voys.nl'
1115: 'linda.hansen@voys.nl', # 'linda.hansen@voys.nl'
1116: '', # 'support@voipgrid.nl'
1117: 'leonie.vandepoll@voys.nl', # 'leonie.vandepoll@voys.nl'
1118: 'rick.bos@voys.nl', # 'rick.bos@voys.nl'
}
############################################################################
# ACCOUNT MAPPINGS
############################################################################
account_status_to_company_type_mapping = {
53: 'CUSTOMER', # 'Active'
54: 'OTHER', # 'Relation'
55: 'PROSPECT', # 'Prospect'
56: 'PAST_CUSTOMER', # 'Previous customer'
347: '', # 'Defaulter'
2615: 'PROSPECT', # 'Reseller prospect'
2616: 'RESELLER', # 'Reseller active'
2617: 'CUSTOMER', # 'Active through reseller'
2618: 'PROSPECT', # 'Prospect through reseller'
}
############################################################################
# CONTACT MAPPINGS
############################################################################
contact_status_to_contact_status_mapping = {
Contact.ACTIVE_STATUS: 'active',
Contact.INACTIVE_STATUS: 'inactive',
}
############################################################################
# CASE MAPPINGS
############################################################################
case_priority_to_ticket_priority_mapping = {
0: 'Low',
1: 'Medium',
2: 'High',
3: 'High', # Hubspot has no critical prio, so merge it with high.
}
case_pipeline = 'CH Voys EU'
case_status_to_ticket_stage_mapping = {
50: 'New', # 'New'
52: 'Waiting on customer', # 'Pending input'
53: 'Waiting on third party', # 'Waiting on hardware'
54: 'Waiting on customer', # 'Follow up'
55: 'Waiting on customer', # 'Client will contact us'
56: 'Closed', # 'Documentation'
58: 'Closed', # 'Closed'
}
case_type_to_ticket_category_mapping = {
1: 'Config and send', # 'Config and send'
2: '', # 'Support'
3: 'Retour', # 'Retour'
4: '', # 'Callback'
6: 'Incident', # 'Documentation'
7: 'Installation', # 'Installation'
9: 'Callback request', # 'Advice'
10: 'Incident', # 'Other'
11: '', # 'App'
12: '', # 'Cloud CTI'
13: '', # 'External nr unreachable'
14: '', # 'Freedom'
15: '', # 'Network related'
16: '', # 'One'
17: '', # 'Phone issue'
18: '', # 'PBX'
19: '', # 'Service interrupted'
2343: 'Dialplan', # 'User related'
2344: 'Incident', # 'Incident'
2345: 'Administration', # 'Administrative'
2346: 'Administration', # 'Bug'
2347: 'Administration', # 'Feature request'
}
############################################################################
# DEAL MAPPINGS
############################################################################
deal_pipeline = 'Voys EU'
deal_status_to_stage_mapping = {
31: 'New lead - unassigned', # 'Open'
32: 'Proposal sent', # 'Proposal sent'
33: 'Done', # 'Won'
34: 'Lost', # 'Lost'
35: 'Contact', # 'Called'
36: 'Request feedback', # 'Emailed'
}
deal_next_step_none_id = 4
deal_next_step_to_stage_mapping = {
1: 'Follow up', # 'Follow up'
2: 'Activate', # 'Activation'
3: 'Request feedback', # 'Feedback request'
5: 'Contact', # 'Contact'
385: 'Proposal viewed', # 'Viewed'
388: 'Porting', # 'Porting'
}
deal_found_through_to_found_through_mapping = {
41: 'Search engine', # 'Search engine'
42: 'Social Media', # 'Social media'
43: 'Talk with employee', # 'Talk with employee'
44: 'Existing customer', # 'Existing customer'
45: 'Other', # 'Other'
46: 'Radio', # 'Radio'
47: 'Public Speaking', # 'Public speaking'
48: 'Press and articles', # 'Press and articles'
4250: 'Middleman', # 'Middleman'
4361: 'Call Center', # 'Call Center'
5429: 'Chamber of Commerce', # 'Chamber of Commerce'
}
deal_contacted_by_to_contact_method_mapping = {
36: 'Contact form', # 'Quote'
37: 'Contact form', # 'Contact form'
38: 'Phone', # 'Phone'
39: 'Web chat', # 'Web chat'
40: 'Email', # 'E-mail'
41: 'Other/ unknown', # 'Instant connect'
42: 'Other/ unknown', # 'Other'
3079: 'Other/ unknown', # 'Cold calling'
}
deal_why_customer_to_won_reason_mapping = {
1: 'Replaces a mobile phone number', # 'Replaces a mobile phone number'
2: 'Not happy at current provider', # 'Not happy at current provider'
3: 'Current system ready for replacement', # 'Current system ready for replacement'
4: 'Start of a new company', # 'Start of a new company'
5: 'Company is moving', # 'Company is moving'
6: 'Change in organisation size', # 'Change in organisation size'
7: 'other', # 'Other'
8: 'other', # 'Unknown'
1589: 'ISDN stops', # 'ISDN stops'
}
deal_why_lost_to_lost_reason_mapping = {
1: 'OTHER', # 'Lost reason'
2: 'Want_unlimited_calling', # 'Wants unlimited calling'
3: 'NO_RESPONSE_FIRST_CONTACT', # 'No response to quote'
4: 'Slow_response_from_Voys', # 'Slow response from Voys'
5: 'CANT_LEAVE_PROVIDER', # 'Can't/won't leave current provider'
6: 'DONT_WANT_CUSTOMER', # 'We don't want this customer!'
7: 'NO_RESPONSE_PROPOSAL', # 'No response after inquiry'
8: 'MISSING_FEATURES', # 'Missing features'
9: 'Voys_One_customer', # 'Voys One customer'
10: 'OTHER', # 'Other'
12: 'TOO_EXPENSIVE', # 'Too expensive'
13: 'No_foreign_address', # 'No foreign address'
3305: 'WANTS_FLATFEE', # 'All-in-one package'
3306: 'Only_wanted_information', # 'Only wanted information'
3307: 'Choose_local_provider', # 'Chooses local provider'
3635: 'Hardware_installation_on_site', # 'Hardware installation on site'
4691: 'Chooses_different_provider', # 'Chooses different provider'
}
|
HelloLily/hellolily
|
lily/hubspot/tenant_mappings/tenant_50.py
|
Python
|
agpl-3.0
| 11,733
|
"""Tests for ``amici.pandas``"""
import itertools
import amici
import numpy as np
import pytest
# test parameters for test_pandas_import_export
combos = itertools.product(
[(10, 5), (5, 10), ()],
repeat=3
)
cases = [{
'fixedParameters': combo[0],
'fixedParametersPreequilibration': combo[1],
'fixedParametersPresimulation': combo[2],
} for combo in combos]
@pytest.mark.parametrize('case', cases)
def test_pandas_import_export(sbml_example_presimulation_module, case):
"""TestCase class for testing csv import using pandas"""
# setup
model = sbml_example_presimulation_module.getModel()
model.setTimepoints(np.linspace(0, 60, 61))
solver = model.getSolver()
rdata = amici.runAmiciSimulation(model, solver)
edata = [amici.ExpData(rdata, 0.01, 0)]
# test copy constructor
_ = amici.ExpData(edata[0])
for fp in case:
setattr(edata[0], fp, case[fp])
df_edata = amici.getDataObservablesAsDataFrame(model, edata)
edata_reconstructed = amici.getEdataFromDataFrame(model, df_edata)
for fp in ['fixedParameters', 'fixedParametersPreequilibration',
'fixedParametersPresimulation']:
if fp != 'fixedParameters' or case[fp] != ():
assert getattr(edata[0], fp) == getattr(edata_reconstructed[0], fp)
assert case[fp] == getattr(edata_reconstructed[0], fp)
else:
assert model.getFixedParameters() \
== getattr(edata_reconstructed[0], fp)
assert model.getFixedParameters() == \
getattr(edata_reconstructed[0], fp)
assert getattr(edata[0], fp) == case[fp]
|
AMICI-developer/AMICI
|
python/tests/test_pandas.py
|
Python
|
bsd-2-clause
| 1,653
|
"""
Diabicus: A calculator that plays music, lights up, and displays facts.
Copyright (C) 2016 Michael Lipschultz
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
from src import cases
from src.numeric_tools import *
class SpecialMusicCase:
"""
Class to hold a specific case for playing special music.
"""
def __init__(self, json_data):
self.json_data = json_data
self.raw_test = json_data.get('test')
self.filename = json_data.get('file')
self.link = json_data.get('link')
self.cite = json_data.get('cite')
self.start = json_data.get('start', 0)
self.end = json_data.get('end')
self.duration = json_data.get('duration')
self.weight = json_data.get('weight', 1)
try:
self.test = eval(self.raw_test)
except Exception:
self.test = False
if not callable(self.test):
self.test = lambda *args: False
if self.cite is None:
if self.link is not None:
self.cite = self.link
else:
self.cite = self.filename
#if self.duration < 0 or not os.path.exists(self.filename):
# self.test = lambda *args: False
def __str__(self):
return self.cite
def __repr__(self):
return str(self) + "{test : " + repr(self.raw_test.encode("unicode-escape")) + "}"
def load():
path = os.path.dirname(__file__)
return cases.load_json_cases(SpecialMusicCase, os.path.join(path, 'special_music.json'))
|
lipschultz/diabicus
|
resources/special_music.py
|
Python
|
gpl-3.0
| 2,129
|
# coding: utf-8
from __future__ import absolute_import
import flask
import auth
import config
import model
import util
from main import app
facebook_config = dict(
access_token_url='https://graph.facebook.com/v4.0/oauth/access_token',
api_base_url='https://graph.facebook.com/v4.0/',
authorize_url='https://www.facebook.com/v4.0/dialog/oauth',
client_id=config.CONFIG_DB.facebook_app_id,
client_secret=config.CONFIG_DB.facebook_app_secret,
request_token_params={'scope': 'email'},
)
facebook = auth.create_oauth_app(facebook_config, 'facebook')
@app.route('/api/auth/callback/facebook/')
def facebook_authorized():
id_token = facebook.authorize_access_token()
if id_token is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
me = facebook.get('/me?fields=id,name,email')
user_db = retrieve_user_from_facebook(me.json())
return auth.signin_user_db(user_db)
@app.route('/signin/facebook/')
def signin_facebook():
return auth.signin_oauth(facebook)
def retrieve_user_from_facebook(response):
auth_id = 'facebook_%s' % response['id']
user_db = model.User.get_by('auth_ids', auth_id)
name = response['name']
return user_db or auth.create_user_db(
auth_id=auth_id,
name=name,
username=name,
email=response.get('email', ''),
verified=bool(response.get('email', '')),
)
|
lipis/github-stats
|
main/auth/facebook.py
|
Python
|
mit
| 1,386
|
from __future__ import division
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
import os
import math
import numpy as np
from osgeo import gdal
class RasterParameters(object):
def __init__(self, raster_x_size, raster_y_size, geo_trans, srs, number_of_bands, nodata, data_types,
driver_short_name=None):
self.RasterXSize = raster_x_size
self.RasterYSize = raster_y_size
self.geo_trans = geo_trans
self.srs = srs
self.number_of_bands = number_of_bands
self.nodata = self.check_value_length(nodata)
self.data_types = self.check_value_length(data_types)
self.driverShortName = driver_short_name
def __repr__(self):
from girs import srs
s = srs.srs_from_wkt(self.srs)
geocs = s.GetAttrValue('GEOGCS')
projcs = s.GetAttrValue('PROJCS')
srs = (geocs if geocs else '') + (projcs if projcs else '')
data_types = ','.join([str(gdal.GetDataTypeName(dt)) for dt in self.data_types])
return 'DIM[{}, {}, {}] ND{} DT[{}] DRV[{}] SRS[{}] TRANS{}'.format(
self.number_of_bands, self.RasterXSize, self.RasterYSize, self.nodata, data_types,
self.driverShortName, srs, self.geo_trans)
def get_coordinate_system(self):
return self.srs
def set_coordinate_system(self, srs):
self.srs = srs
def check_value_length(self, v):
n = self.number_of_bands
try:
if n < len(v):
v = v[:n]
elif n > len(v):
v = v[:-1] + [v[-1]] * (n - len(v) + 1)
except TypeError as te:
v = [v] * n
except:
raise
return v
def set_nodata(self, nodata):
self.nodata = self.check_value_length(nodata)
def pixel_size(self):
x_min, y_max = self.pixel_to_world(0, 0)
x_max, y_min = self.pixel_to_world(self.RasterXSize, self.RasterYSize)
return old_div((x_max - x_min),self.RasterXSize) , old_div((y_max-y_min),self.RasterYSize)
def pixel_to_world(self, x, y):
return self.geo_trans[0] + (x * self.geo_trans[1]), self.geo_trans[3] + (y * self.geo_trans[5])
def get_extent_world(self):
x_min0, y_max0 = self.pixel_to_world(0, 0)
x_max0, y_min0 = self.pixel_to_world(self.RasterXSize, self.RasterYSize)
return x_min0, x_max0, y_min0, y_max0
def world_to_pixel(self, x, y):
"""
:param x:
:param y:
:return:
"""
return int(math.floor(float(x - self.geo_trans[0]) / self.geo_trans[1])),\
int(math.floor(float(y - self.geo_trans[3]) / self.geo_trans[5]))
def extent_world_to_pixel(self, min_x, max_x, min_y, max_y):
u_min, v_min = self.world_to_pixel(min_x, max_y)
u_max, v_max = self.world_to_pixel(max_x, min_y)
geo_trans = list(self.geo_trans)
geo_trans[0], geo_trans[3] = self.pixel_to_world(u_min, v_min)
return (u_min, u_max, v_min, v_max), geo_trans
def set_driver_short_name(self, filename):
filename = filename.lower()
if filename == 'mem':
self.driverShortName = 'MEM'
else:
try:
self.driverShortName = gdal.IdentifyDriver(filename).ShortName
except AttributeError:
self.driverShortName = filename
def get_default_values(self, values):
try:
if self.number_of_bands < len(values):
values = values[:self.number_of_bands]
elif self.number_of_bands > len(values):
values = values[-1] * (self.number_of_bands - len(values))
except TypeError:
values = [values] * self.number_of_bands
except:
raise
return values
def create_array(self, value=np.nan):
array = np.empty((self.number_of_bands, self.RasterYSize, self.RasterXSize))
for i in range(self.number_of_bands):
array[i] = np.empty((self.RasterYSize, self.RasterXSize)) * value
return array
def clip(self, x_min, x_max, y_min, y_max):
"""
:param x_min:
:param x_max:
:param y_min:
:param y_max:
:return:
"""
(u_min, u_max, v_min, v_max), geo_trans = self.extent_world_to_pixel(x_min, x_max, y_min, y_max)
return RasterParameters(u_max - u_min , v_max - v_min, geo_trans, self.srs, self.number_of_bands,
self.nodata, self.data_types, self.driverShortName)
def get_parameters(ds):
"""Return the raster parameters defined in the dataset
:param ds: dataset or filename
:type ds: gdal.Dataset
:return: raster parameters
:rtype: RasterParameters
"""
try:
if ds.endswith('.zip'):
ds = gdal.Open('/vsizip/' + ds + '/' + os.path.basename(ds[:-4]))
elif ds.endswith('.gz'):
ds = gdal.Open('/vsigzip/' + ds + '/' + os.path.basename(ds[:-3]))
else:
ds = gdal.Open(ds)
except Exception:
if isinstance(ds, RasterParameters):
return ds
xs = ds.RasterXSize
ys = ds.RasterYSize
gt = ds.GetGeoTransform()
rs = ds.GetProjection()
nb = ds.RasterCount
nd = [ds.GetRasterBand(i+1).GetNoDataValue() for i in range(ds.RasterCount)]
dt = [ds.GetRasterBand(i+1).DataType for i in range(ds.RasterCount)]
ds = ds.GetDriver().ShortName
return RasterParameters(xs, ys, gt, rs, nb, nd, dt, ds)
|
JRoehrig/GIRS
|
girs/rast/parameter.py
|
Python
|
mit
| 5,561
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
# just running it on code in our repositories, not on externally acquired data.
from xml.dom.minidom import parse
from pants.backend.codegen.targets.jaxb_library import JaxbLibrary
from pants.backend.codegen.tasks.code_gen import CodeGen
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.address import SyntheticAddress
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.java.distribution.distribution import Distribution
from pants.util.dirutil import safe_mkdir
# python documentation says xml parsing is insecure, but this should be safe usage because we're
class JaxbGen(CodeGen, NailgunTask):
"""Generates java source files from jaxb schema (.xsd)."""
_CONFIG_SECTION = 'jaxb-gen'
def __init__(self, *args, **kwargs):
"""
:param context: inherited parameter from Task
:param workdir: inherited parameter from Task
"""
super(JaxbGen, self).__init__(*args, **kwargs)
self.gen_langs = set()
lang = 'java'
if self.context.products.isrequired(lang):
self.gen_langs.add(lang)
self.jar_location = os.path.join(Distribution.cached().home, '..', 'lib', 'tools.jar')
@property
def config_section(self):
return self._CONFIG_SECTION
def _compile_schema(self, args):
classpath = [self.jar_location]
java_main = 'com.sun.tools.internal.xjc.Driver'
return self.runjava(classpath=classpath, main=java_main, args=args, workunit_name='xjc')
def is_forced(self, lang):
return lang in self.gen_langs
def is_gentarget(self, target):
return isinstance(target, JaxbLibrary)
def prepare_gen(self, target):
pass
def genlang(self, lang, targets):
if lang != 'java':
raise TaskError('Unrecognized jaxb language: %s' % lang)
output_dir = os.path.join(self.workdir, 'gen-java')
safe_mkdir(output_dir)
cache = []
for target in targets:
if not isinstance(target, JaxbLibrary):
raise TaskError('Invalid target type "{class_type}" (expected JaxbLibrary)'
.format(class_type=type(target).__name__))
target_files = []
for source in target.sources_relative_to_buildroot():
path_to_xsd = source
output_package = target.package
if output_package is None:
output_package = self._guess_package(source)
output_package = self._correct_package(output_package)
output_directory = output_dir
safe_mkdir(output_directory)
args = ['-p', output_package, '-d', output_directory, path_to_xsd]
result = self._compile_schema(args)
if result != 0:
raise TaskError('xjc ... exited non-zero ({code})'.format(code=result))
target_files.append(self._sources_to_be_generated(target.package, path_to_xsd))
cache.append((target, target_files))
return cache
def genlangs(self):
return {'java': lambda t: t.is_jvm}
def createtarget(self, lang, gentarget, dependees):
predicates = self.genlangs()
languages = predicates.keys()
if not (lang in languages) or not (predicates[lang](gentarget)):
raise TaskError('Invalid language "{lang}" for task {task}'
.format(lang=lang, task=type(self).__name__))
to_generate = []
for source in gentarget.sources_relative_to_buildroot():
to_generate.extend(self._sources_to_be_generated(gentarget.package, source))
spec_path = os.path.join(os.path.relpath(self.workdir, get_buildroot()), 'gen-java')
address = SyntheticAddress(spec_path=spec_path, target_name=gentarget.id)
target = self.context.add_new_target(
address,
JavaLibrary,
derived_from=gentarget,
sources=to_generate,
provides=gentarget.provides,
dependencies=[],
excludes=gentarget.payload.excludes
)
for dependee in dependees:
dependee.inject_dependency(target.address)
return target
@classmethod
def _guess_package(self, path):
"""Used in genlang to actually invoke the compiler with the proper arguments, and in
createtarget (via _sources_to_be_generated) to declare what the generated files will be.
"""
package = ''
slash = path.rfind(os.path.sep)
com = path.rfind(os.path.join('', 'com', ''))
if com < 0 and path.find(os.path.join('com', '')) == 0:
package = path[:slash]
elif com >= 0:
package = path[com:slash]
package = package.replace(os.path.sep, ' ')
package = package.strip().replace(' ', '.')
return package
@classmethod
def _correct_package(self, package):
package = package.replace('/', '.')
package = re.sub(r'^\.+', '', package)
package = re.sub(r'\.+$', '', package)
if re.search(r'\.{2,}', package) is not None:
raise ValueError('Package name cannot have consecutive periods! (%s)' % package)
return package
@classmethod
def _sources_to_be_generated(self, package, path):
"""This method (or some variation of it) seems to be common amongst all implementations of
code-generating tasks.
As far as I can tell, its purpose is to peek into the relevant schema files and figure out what
the final output files will be. This is typically implemented with a variety of hacks,
accompanied by TODO's saying to do it properly in the future (see apache_thrift_gen.py and
protobuf_gen.py). The implementation in this file does it 'properly' using python's xml parser,
though I am making some assumptions about how .xsd's are supposed to be formatted, as that is
not a subject I am particularly informed about.
"""
doc = parse(path)
if package is None:
package = self._guess_package(path)
package = self._correct_package(package)
names = []
for root in doc.childNodes:
if re.match('.*?:schema$', root.nodeName, re.I) is not None:
for element in root.childNodes:
if element.nodeName != '#text' and element.attributes.has_key('name'):
name = element.attributes['name'].nodeValue
if len(name) == 0: continue
# enforce pascal-case class names
name = name[0:1].upper() + name[1:]
names.append(name)
names.append('ObjectFactory')
outdir = package.replace('.', '/')
return [os.path.join(outdir, '%s.java' % name) for name in names]
|
tejal29/pants
|
src/python/pants/backend/codegen/tasks/jaxb_gen.py
|
Python
|
apache-2.0
| 6,748
|
# (C) 2017, Markus Wildi, markus.wildi@bluewin.ch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
import unittest
import subprocess
import os
import sys
import pwd
import grp
import signal
import logging
import time
import glob
import psycopg2
basepath='/tmp/u_point_unittest'
if not os.path.exists(basepath):
# make sure that it is writabel (ssh user@host)
ret=os.makedirs(basepath, mode=0o0777)
logging.basicConfig(filename=os.path.join(basepath,'unittest.log'), level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger()
class Args(object):
def __init__(self):
pass
class RTS2Environment(unittest.TestCase):
def tearDown(self):
processes=['rts2-centrald','rts2-executor', 'rts2-httpd','rts2-focusd-dummy','rts2-filterd-dummy', 'rts2-camd-dummy', 'rts2-teld-dummy']
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.decode("utf-8").splitlines():
# wildi 7432 0.0 0.1 24692 5192 pts/1 S 17:34 0:01 /usr/local/bin/rts2-centrald
itms= line.split()
exe= itms[10].split('/')[-1]
if self.uid in itms[0] and exe in processes:
pid = int(itms[1])
os.kill(pid, signal.SIGTERM)
# reove th lock files
for fn in glob.glob(self.lockPrefix+'*'):
os.unlink (fn)
def setUp(self):
# by name
self.uid=pwd.getpwuid(os.getuid())[0]
self.gid= grp.getgrgid(os.getgid())[0]
# lock prefix
self.lockPrefix = '/tmp/rts2_{}'.format(self.uid)
# sometimes they are present
self.tearDown()
# set up RTS2
# rts2-centrald
cmd=[ '/usr/local/bin/rts2-centrald',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--local-port', '1617',
'--logfile', os.path.join(basepath,'rts2-debug'),
'--lock-prefix', self.lockPrefix,
'--config', './rts2-unittest.ini'
]
self.p_centrald= subprocess.Popen(cmd)
# rts2-executor
cmd=[ '/usr/local/bin/rts2-executor',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--config', './rts2-unittest.ini',
'--server', '127.0.0.1:1617',
'--noauth'
]
self.p_exec= subprocess.Popen(cmd)
# rts2-httpd
cmd=[ '/usr/local/bin/rts2-httpd',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--config', './rts2-unittest.ini',
'--server', '127.0.0.1:1617',
'-p', '9999',
'--noauth',# seems not to work
]
self.p_httpd= subprocess.Popen(cmd)
# rts2-focusd-dummy
focName='F0'
cmd=[ '/usr/local/bin/rts2-focusd-dummy',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--server', '127.0.0.1:1617',
'-d', focName,
'--modefile', './f0.modefile',
#'--config', './rts2-unittest.ini'
]
self.p_focusd_dummy= subprocess.Popen(cmd)
# rts2-filterd-dummy
ftwnName='W0'
cmd=[ '/usr/local/bin/rts2-filterd-dummy',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--server', '127.0.0.1:1617',
'-d', ftwnName,
#'--config', './rts2-unittest.ini'
]
ftnames = 'U:B:V:R:I:H:X'
cmd.append('-F')
cmd.append(ftnames)
self.p_filterd_dummy= subprocess.Popen(cmd)
# rts2-camd-dummy
name='C0'
# '--wheeldev', 'W0', '--filter-offsets', '1:2:3:4:5:6:7'
cmd=[ '/usr/local/bin/rts2-camd-dummy',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--server', '127.0.0.1:1617',
'-d', name,
'--focdev', focName,
# not available
#'--config', './rts2-unittest.ini'
]
cmd.append('--wheeldev')
cmd.append(ftwnName)
cmd.append('--filter-offsets')
cmd.append('1:2:3:4:5:6:7')
self.p_camd_dummy= subprocess.Popen(cmd)
# rts2-teld-dummy
mntName='T0'
cmd=[ '/usr/local/bin/rts2-teld-dummy',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--server', '127.0.0.1:1617',
'-d', mntName,
'--modefile', './t0.modefile',
#'--config', './rts2-unittest.ini'
]
self.p_teld_dummy= subprocess.Popen(cmd)
#
print('sleeping 10 sec')
time.sleep(10)
print('sleeping over')
|
RTS2/rts2
|
scripts/u_point/unittest/rts2_environment.py
|
Python
|
lgpl-3.0
| 5,725
|
'''
Created on Jul 31, 2014
@author: David Zwicker <dzwicker@seas.harvard.edu>
This package provides class definitions for referencing a single video file.
This code has been modified from the project moviepy, which is released under
the MIT license at github:
https://github.com/Zulko/moviepy/blob/master/moviepy/video/io
The MIT license text is included in the present package in the file
/external/LICENSE_MIT.txt
'''
from __future__ import division
import fcntl
import os
import re
import logging
import subprocess
import time
import numpy as np
from .base import VideoBase
from utils.data_structures import cache
logger = logging.getLogger('video.io')
# find the file handle to /dev/null to dumb strings
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
def get_ffmpeg_version(cmd):
""" helper function trying to get the version number from ffmpeg """
try:
# try getting help page from ffmpeg
output = subprocess.check_output([cmd, '-h'], stderr=subprocess.STDOUT)
# search for the version number and parse it
match = re.search("ffmpeg version (\d+)\.(\d+)", output)
version = tuple(int(match.group(k)) for k in xrange(1, 3))
except:
version = None
return version
# search for the FFmpeg command
FFMPEG_VERSION = get_ffmpeg_version('ffmpeg')
if FFMPEG_VERSION:
FFMPEG_BINARY = 'ffmpeg'
FFPROBE_BINARY = 'ffprobe'
logger.debug('Found ffmpeg v%s at %s',
'.'.join(str(i) for i in FFMPEG_VERSION),
subprocess.check_output(['which', 'ffmpeg']).strip())
else:
FFMPEG_VERSION = get_ffmpeg_version('ffmpeg.exe')
if FFMPEG_VERSION:
FFMPEG_BINARY = 'ffmpeg.exe'
FFPROBE_BINARY = 'ffprobe.exe'
logger.debug('Found ffmpeg.exe v%s.',
'.'.join(str(i) for i in FFMPEG_VERSION))
else:
FFMPEG_BINARY = None
FFPROBE_BINARY = None
logger.warn('ffmpeg binary not found. Functions relying on it will not '
'be available.')
class FFmpegError(IOError):
pass
class VideoFFmpeg(VideoBase):
""" Class handling a single movie file using FFmpeg
"""
#seek_max_frames = 100
seekable = True #< this video is seekable
parameters_default = {
'bufsize': None, #< buffer size for communicating with ffmpeg
'pix_fmt': 'rgb24', #< pixel format returned by ffmpeg
'video_info_method': 'header', #< method for estimating frame count
'ffprobe_cache': None, #< cache file for the ffprobe result
'reopen_delay': 0, #< seconds to wait before reopening a video
'seek_method': 'auto', #< method used for seeking
'seek_max_frames': 100, #< the maximal number of frames we seek through
'seek_offset': 1, #< seconds the rough seek is placed before the target
}
def __init__(self, filename, parameters=None):
""" initialize a video that will be read with FFmpeg
filename is the name of the filename.
`parameters` denotes additional parameters
"""
self.parameters = self.parameters_default.copy()
if parameters:
self.parameters.update(parameters)
# get information about the video using FFmpeg
self.filename = os.path.expanduser(filename)
if self.parameters['video_info_method'] == 'header':
# use the information from the movie header
infos = ffmpeg_parse_infos(self.filename)
elif self.parameters['video_info_method'] == 'ffprobe':
# determine the information by iterating through the video
infos = ffprobe_get_infos(
self.filename,
cache_file=self.parameters['ffprobe_cache']
)
else:
raise ValueError('Unknown method `%s` for determining information '
'about the video'
% self.parameters['video_info_method'])
# store information in class
self.duration = infos['video_duration']
self.infos = infos
self.pix_fmt = self.parameters['pix_fmt']
if self.pix_fmt == 'rgba':
self.depth = 4
elif self.pix_fmt == 'rgb24':
self.depth = 3
else:
raise ValueError('Unsupported pixel format `%s`' % self.pix_fmt)
if self.parameters['bufsize'] is None:
w, h = infos['video_size']
bufsize = 2 * self.depth * w * h + 100 #< add some safety margin
# initialize the process that eventually reads the video
self.bufsize = bufsize
self.proc = None
self.open()
self.lastread = None
super(VideoFFmpeg, self).__init__(size=tuple(infos['video_size']),
frame_count=infos['video_nframes'],
fps=infos['video_fps'], is_color=True)
logger.debug('Initialized video `%s` with %d frames using FFmpeg',
self.filename, infos['video_nframes'])
def print_infos(self):
""" print information about the video file """
if self.parameters['video_info_method'] == 'header':
ffmpeg_parse_infos(self.filename, print_infos=True)
elif self.parameters['video_info_method'] == 'ffprobe':
print(self.infos)
else:
raise ValueError('Unknown method `%s` for determining information '
'about the video'
% self.parameters['video_info_method'])
@property
def closed(self):
return self.proc is None
def open(self, index=0):
""" Opens the file, creates the pipe. """
logger.debug('Open video `%s`' % self.filename)
# close video if it was opened
if not self.closed:
self.close()
# wait some time until we reopen the video
reopen_delay = self.parameters['reopen_delay']
if reopen_delay > 0:
logger.debug('Wait %g seconds before reopening video',
reopen_delay)
time.sleep(reopen_delay)
if index > 0:
# we have to seek to another index/time
# determine the time that we have to seek to
# the -0.1 is necessary to prevent rounding errors
starttime = (index - 0.1) / self.fps
# determine which method to use for seeking
seek_method = self.parameters['seek_method']
if seek_method == 'auto':
if FFMPEG_VERSION > (2, 1):
seek_method = 'exact'
else:
seek_method = 'keyframe'
if seek_method == 'exact':
# newer ffmpeg version, which supports accurate seeking
i_arg = ['-ss', "%.03f" % starttime,
'-i', self.filename]
elif seek_method == 'keyframe':
# older ffmpeg version, which does not support accurate seeking
if index < self.parameters['seek_max_frames']:
# we only have to seek a little bit
i_arg = ['-i', self.filename,
'-ss', "%.03f" % starttime]
else:
# we first seek to a keyframe and then proceed from there
seek_offset = self.parameters['seek_offset']
i_arg = ['-ss', "%.03f" % (starttime - seek_offset),
'-i', self.filename,
'-ss', "%.03f" % seek_offset]
else:
raise ValueError('Unknown seek method `%s`' % seek_method)
logger.debug('Seek video to frame %d (=%.03fs)', index, starttime)
else:
# we can just open the video at the first frame
i_arg = ['-i', self.filename]
# build ffmpeg command line
cmd = ([FFMPEG_BINARY] +
i_arg +
['-loglevel', 'error',
'-f', 'image2pipe',
'-pix_fmt', self.pix_fmt,
'-vcodec', 'rawvideo',
'-'])
self.proc = subprocess.Popen(cmd, bufsize=self.bufsize,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# set the stderr to non-blocking; used the idea from
# http://stackoverflow.com/a/8980466/932593
# this only works on UNIX!
fcntl.fcntl(self.proc.stderr.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
self._frame_pos = index
def set_frame_pos(self, index):
""" sets the video to position index """
if index < 0:
index += self.frame_count
if index != self._frame_pos:
# determine the farthest frame that we would reach by skipping
max_future_frame = self._frame_pos + \
self.parameters['seek_max_frames']
if (index < self._frame_pos) or (index > max_future_frame):
# reopen the video at the correct position
self.open(index)
else:
# skip frames to reach the requested position
skip_frames = index - self._frame_pos
w, h = self.size
for _ in xrange(skip_frames):
self.proc.stdout.read(self.depth*w*h)
self.proc.stdout.flush()
self._frame_pos = index
def get_next_frame(self):
""" retrieve the next frame from the video """
# read standard error output and log it if requested
try:
stderr_content = self.proc.stderr.read()
except IOError:
# nothing to read from stderr
pass
else:
logger.debug(stderr_content)
w, h = self.size
nbytes = self.depth*w*h
# read the next frame from the process
s = self.proc.stdout.read(nbytes)
if len(s) != nbytes:
# frame_count is a rather crude estimate of the length of the video.
# We thus stop the iteration, when we think we are close to the end.
# The magic numbers 5 and 0.01 are rather arbitrary, but have proven
# to work in most practical cases.
frames_remaining = self.frame_count - self._frame_pos
if frames_remaining < 5 or frames_remaining < 0.01*self.frame_count:
raise StopIteration
logger.warn("Warning: in file %s, %d bytes wanted but %d bytes "
"read, at frame %d/%d, at time %.02f/%.02f sec. "
"Using the last valid frame instead." %
(self.filename, nbytes, len(s),
self._frame_pos, self.frame_count,
self._frame_pos/self.fps, self.duration))
if self.lastread is None:
raise FFmpegError(
"Failed to read the first frame of video file %s. That "
"might mean that the file is corrupted. That may also mean "
"that your version of FFmpeg (%s) is too old."
% (self.filename, '.'.join(str(v) for v in FFMPEG_VERSION))
)
result = self.lastread
else:
# frame has been obtained properly
shape = (h, w, self.depth)
result = np.frombuffer(s, dtype='uint8').reshape(shape)
self.lastread = result
self._frame_pos += 1
return result
def get_frame(self, index):
""" Read a file video frame at time t.
Note for coders: getting an arbitrary frame in the video with
FFmpeg can be painfully slow if some decoding has to be done.
This function tries to avoid fetching arbitrary frames
whenever possible, by moving between adjacent frames.
"""
if index < 0:
index += self.frame_count
if index == self._frame_pos - 1:
return self.lastread
else:
self.set_frame_pos(index)
result = self.get_next_frame()
assert self._frame_pos == index + 1
return result
def close(self):
""" close the process reading the video """
if self.proc is not None:
try:
self.proc.terminate()
self.proc.stdout.close()
self.proc.stderr.close()
self.proc.wait()
except IOError:
pass
self.proc = None
def __enter__(self):
return self
def __exit__(self, e_type, e_value, e_traceback):
self.close()
def __del__(self):
self.close()
if hasattr(self, 'lastread'):
del self.lastread
class VideoWriterFFmpeg(object):
""" A class for FFmpeg-based video writing.
A class to write videos using FFmpeg. FFmpeg will write in a large
choice of formats.
Parameters
-----------
filename
Any filename like 'video.mp4' etc. but if you want to avoid
complications it is recommended to use the generic extension
'.avi' for all your videos.
size
Size (width, height) of the output video in pixels.
fps
Frames per second in the output video file.
codec
FFmpeg codec. It seems that in terms of quality the hierarchy is
'rawvideo' = 'png' > 'mpeg4' > 'libx264'
'png' manages the same lossless quality as 'rawvideo' but yields
smaller files. Type ``FFmpeg -codecs`` in a terminal to get a list
of accepted codecs.
Note for default 'libx264': by default the pixel format yuv420p
is used. If the video dimensions are not both even (e.g. 720x405)
another pixel format is used, and this can cause problem in some
video readers.
bitrate
Only relevant for codecs which accept a bitrate. "5000k" offers
nice results in general.
"""
def __init__(self, filename, size, fps, is_color=True, codec="libx264",
bitrate=None):
"""
Initializes the video writer.
`filename` is the name of the video
`size` is a tuple determining the width and height of the video
`fps` determines the frame rate in 1/seconds
`is_color` is a flag indicating whether the video is in color
`codec` selects a codec supported by FFmpeg
`bitrate` determines the associated bitrate
"""
self.filename = os.path.expanduser(filename)
self.codec = codec
self.ext = self.filename.split(".")[-1]
self.size = size
self.is_color = is_color
self.frames_written = 0
if size[0] % 2 != 0 or size[1] % 2 != 0:
raise ValueError('Both dimensions of the video must be even for '
'the video codec to work properly')
# determine whether we are in debug mode
debug = (logger.getEffectiveLevel() >= logging.DEBUG)
#FIXME: consider adding the flags
# "-f ismv" "-movflags frag_keyframe"
# to avoid corrupted mov files, if movie writing is interrupted
# build the FFmpeg command
cmd = (
[FFMPEG_BINARY, '-y',
'-loglevel', 'verbose' if debug else 'error',
'-threads', '1', #< single threaded encoding for safety
'-f', 'rawvideo',
'-vcodec','rawvideo',
'-s', "%dx%d" % tuple(size),
'-pix_fmt', 'rgb24' if is_color else 'gray',
'-r', '%.02f' % fps,
'-i', '-',
'-an'] # no audio
+ ([] if (codec is None) else ['-c:v', codec])
+ ([] if (bitrate is None) else ['-b:v', bitrate])
# http://trac.FFmpeg.org/ticket/658
+ (['-pix_fmt', 'yuv420p']
if ((codec == 'libx264') and
(size[0] % 2 == 0) and
(size[1] % 2 == 0))
else [])
+ ['-r', "%.02f" % fps, filename]
)
# estimate the buffer size with some safety margins
depth = 3 if is_color else 1
bufsize = 2 * depth * size[0] * size[1] + 100
# start FFmpeg, which should wait for input
self.proc = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=DEVNULL, stderr=subprocess.PIPE,
bufsize=bufsize)
# set the stderr to non-blocking; used the idea from
# http://stackoverflow.com/a/8980466/932593
# this only works on UNIX!
fcntl.fcntl(self.proc.stderr.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
logger.info('Start writing video `%s` with codec `%s` using FFmpeg.',
filename, codec)
@property
def shape(self):
""" returns the shape of the data describing the movie """
shape = (self.size[1], self.size[0])
if self.is_color:
shape += (3,)
return shape
def write_frame(self, img_array):
""" Writes a single frame in the file """
img_array = img_array.astype(np.uint8, copy=False)
if self.is_color and img_array.ndim == 2:
img_array = img_array[:, :, None]*np.ones((1, 1, 3), np.uint8)
try:
self.proc.stdin.write(img_array.tostring())
except IOError as err:
FFmpeg_error = self.proc.stderr.read()
error = (str(err) +
"\n\nFFmpeg encountered the following error while "
"writing file %s:\n\n" % self.filename +
FFmpeg_error)
if "Unknown encoder" in FFmpeg_error:
error = error + ("\n\nThe video export "
"failed because FFmpeg didn't find the specified "
"codec for video encoding (%s). Please install "
"this codec or use a different codec") % (self.codec)
elif "incorrect codec parameters ?" in FFmpeg_error:
error = error + ("\n\nThe video export "
"failed, possibly because the codec specified for "
"the video (%s) is not compatible with the given "
"extension (%s). Please specify a valid 'codec' "
"argument in write_videofile. This would be 'libx264' "
"or 'mpeg4' for mp4, 'libtheora' for ogv, 'libvpx' "
"for webm.") % (self.codec, self.ext)
elif "encoder setup failed" in FFmpeg_error:
error = error + ("\n\nThe video export "
"failed, possibly because the bitrate you specified "
"was too high or too low for the video codec.")
# add parameters of the video for additional information
error += "\nVideo: {size} {color}\nCodec: {codec}\n".format(
**{'size': ' x '.join(str(v) for v in self.size),
'color': 'color' if self.is_color else 'monochrome',
'codec': self.codec})
raise FFmpegError(error)
else:
self.frames_written += 1
# read standard error output and log it if requested
try:
stderr_content = self.proc.stderr.read()
except IOError:
# nothing to read from stderr
pass
else:
logger.debug(stderr_content)
def close(self):
""" finishes the process, which should also make the video available """
if self.proc is not None:
try:
self.proc.communicate()
except IOError:
pass
logger.info('Wrote video to file `%s`', self.filename)
self.proc = None
def __enter__(self):
return self
def __exit__(self, e_type, e_value, e_traceback):
self.close()
def __del__(self):
self.close()
def ffmpeg_parse_infos(filename, print_infos=False):
"""Get file information using FFmpeg.
Returns a dictionary with the fields:
"video_found", "video_fps", "duration", "video_nframes",
"video_duration"
"video_duration" is slightly smaller than "duration" to avoid
fetching the uncompleted frames at the end, which raises an error.
"""
# open the file in a pipe, provoke an error, read output
is_GIF = filename.endswith('.gif')
cmd = [FFMPEG_BINARY, "-i", filename]
if is_GIF:
cmd += ["-f", "null", "/dev/null"]
proc = subprocess.Popen(cmd,
bufsize=10**5,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.stdout.readline()
proc.terminate()
infos = proc.stderr.read().decode('utf8')
del proc
if print_infos:
# print the whole info text returned by FFmpeg
print(infos)
lines = infos.splitlines()
if "No such file or directory" in lines[-1]:
raise IOError("The file %s could not be found!\n"
"Please check that you entered the correct path.\n"
"Here are the file information returned by FFmpeg:\n\n%s"
% (filename, infos))
# get duration (in seconds)
result = {}
try:
keyword = ('frame=' if is_GIF else 'Duration: ')
line = [l for l in lines if keyword in l][0]
match = re.findall("([0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9])",
line)[0]
result['duration'] = time_to_seconds(match)
except:
raise IOError("Failed to read the duration of file %s.\n"
"Here are the file information returned by FFmpeg:\n\n%s"
% (filename, infos))
# get the output line that speaks about video
lines_video = [l for l in lines if ' Video: ' in l]
result['video_found'] = bool(lines_video)
if result['video_found']:
line = lines_video[0]
# get the size, of the form 460x320 (w x h)
match = re.search(" [0-9]*x[0-9]*(,| )", line)
s = list(map(int, line[match.start():match.end()-1].split('x')))
result['video_size'] = s
# get the frame rate. Sometimes it's 'tbr', sometimes 'fps', sometimes
# tbc, and sometimes tbc/2...
# Current policy: Trust tbr first, then fps. If result is near from
# x*1000/1001 where x is 23,24,25,50, replace by x*1000/1001 (very
# common case for the fps).
try:
match = re.search("( [0-9]*.| )[0-9]* tbr", line)
tbr = float(line[match.start():match.end()].split(' ')[1])
result['video_fps'] = tbr
except:
match = re.search("( [0-9]*.| )[0-9]* fps", line)
substr = line[match.start():match.end()]
result['video_fps'] = float(substr.split(' ')[1])
# It is known that a fps of 24 is often written as 24000/1001
# but then FFmpeg nicely rounds it to 23.98, which we hate.
coef = 1000.0/1001.0
fps = result['video_fps']
for x in [23, 24, 25, 30, 50, 60]:
if (fps != x) and abs(fps - x*coef) < .01:
result['video_fps'] = x*coef
result['video_nframes'] = int(result['duration']*result['video_fps']) + 1
result['video_duration'] = result['duration']
# We could have also recomputed the duration from the number
# of frames, as follows:
# >>> result['video_duration'] = result['video_nframes'] / result['video_fps']
return result
def ffprobe_get_infos(video_file, print_infos=False, cache_file=None):
"""Get file information using ffprobe, which iterates through the video.
Returns a dictionary with the fields:
"video_found", "video_fps", "duration", "video_nframes",
"video_duration"
"video_duration" is slightly smaller than "duration" to avoid
fetching the uncompleted frames at the end, which raises an error.
"""
import json
# prepare program call
cmd = [FFPROBE_BINARY,
'-i', video_file,
'-print_format', 'json',
'-loglevel', 'error',
'-show_streams', '-count_frames',
'-select_streams', 'v']
if cache_file:
# load the cache of all the ffprobe calls
ffprobe_cache = cache.PersistentDict(cache_file)
try:
# try to fetch the output from this cache
output = ffprobe_cache[video_file]
except KeyError:
# the videofile was not yet processed
logger.info('Determining information by iterating through video '
'`%s` and store it in cache `%s`',
video_file, cache_file)
# run ffprobe and fetch its output from the command line
output = subprocess.check_output(cmd)
# store result in the cache
ffprobe_cache[video_file] = output
else:
logger.info('Loaded information about video `%s` from cache `%s`',
video_file, cache_file)
else:
# run ffprobe and fetch its output from the command line
logger.info('Determining information by iterating through video '
'`%s`' % video_file)
output = subprocess.check_output(cmd)
# parse the json output
infos = json.loads(output)
if print_infos:
print(infos)
# select the first stream
infos = infos["streams"][0]
# add synonyms
try:
fps_e, fps_d = infos['r_frame_rate'].split('/')
infos['video_size'] = (int(infos['width']), int(infos['height']))
infos['video_fps'] = float(fps_e) / float(fps_d)
infos['video_nframes'] = int(infos['nb_read_frames'])
except KeyError:
logger.error('Video information did not have the expected format.\n'
'The following information was obtained:\n%s', infos)
try:
infos['video_duration'] = float(infos['duration'])
except KeyError:
infos['video_duration'] = infos['video_nframes'] / infos['video_fps']
return infos
def time_to_seconds(time):
""" Will convert any time into seconds.
Here are the accepted formats:
>>> time_to_seconds(15.4) -> 15.4 # seconds
>>> time_to_seconds((1, 21.5)) -> 81.5 # (min,sec)
>>> time_to_seconds((1, 1, 2)) -> 3662 # (hr, min, sec)
>>> time_to_seconds('01:01:33.5') -> 3693.5 #(hr,min,sec)
>>> time_to_seconds('01:01:33.045') -> 3693.045
>>> time_to_seconds('01:01:33,5') # comma works too
"""
if isinstance(time, basestring):
if (',' not in time) and ('.' not in time):
time = time + '.0'
expr = r"(\d+):(\d+):(\d+)[,|.](\d+)"
finds = re.findall(expr, time)[0]
nums = list( map(float, finds) )
return (3600*int(finds[0])
+ 60*int(finds[1])
+ int(finds[2])
+ nums[3]/(10**len(finds[3])))
elif isinstance(time, tuple):
if len(time) == 3:
hr, mn, sec = time
elif len(time) == 2:
hr, mn, sec = 0, time[0], time[1]
return 3600*hr + 60*mn + sec
else:
return time
|
david-zwicker/video-analysis
|
video/io/backend_ffmpeg.py
|
Python
|
bsd-3-clause
| 28,074
|
from featuretools.entityset.relationship import Relationship, RelationshipPath
def test_relationship_path(es):
log_to_sessions = Relationship(es['sessions']['id'],
es['log']['session_id'])
sessions_to_customers = Relationship(es['customers']['id'],
es['sessions']['customer_id'])
path_list = [(True, log_to_sessions),
(True, sessions_to_customers),
(False, sessions_to_customers)]
path = RelationshipPath(path_list)
for i, edge in enumerate(path_list):
assert path[i] == edge
assert [edge for edge in path] == path_list
def test_relationship_path_name(es):
assert RelationshipPath([]).name == ''
log_to_sessions = Relationship(es['sessions']['id'],
es['log']['session_id'])
sessions_to_customers = Relationship(es['customers']['id'],
es['sessions']['customer_id'])
forward_path = [(True, log_to_sessions), (True, sessions_to_customers)]
assert RelationshipPath(forward_path).name == 'sessions.customers'
backward_path = [(False, sessions_to_customers), (False, log_to_sessions)]
assert RelationshipPath(backward_path).name == 'sessions.log'
mixed_path = [(True, log_to_sessions), (False, log_to_sessions)]
assert RelationshipPath(mixed_path).name == 'sessions.log'
def test_relationship_path_entities(es):
assert list(RelationshipPath([]).entities()) == []
log_to_sessions = Relationship(es['sessions']['id'],
es['log']['session_id'])
sessions_to_customers = Relationship(es['customers']['id'],
es['sessions']['customer_id'])
forward_path = [(True, log_to_sessions), (True, sessions_to_customers)]
assert list(RelationshipPath(forward_path).entities()) == ['log', 'sessions', 'customers']
backward_path = [(False, sessions_to_customers), (False, log_to_sessions)]
assert list(RelationshipPath(backward_path).entities()) == ['customers', 'sessions', 'log']
mixed_path = [(True, log_to_sessions), (False, log_to_sessions)]
assert list(RelationshipPath(mixed_path).entities()) == ['log', 'sessions', 'log']
def test_names_when_multiple_relationships_between_entities(games_es):
relationship = Relationship(games_es['teams']['id'],
games_es['games']['home_team_id'])
assert relationship.child_name == 'games[home_team_id]'
assert relationship.parent_name == 'teams[home_team_id]'
def test_names_when_no_other_relationship_between_entities(home_games_es):
relationship = Relationship(home_games_es['teams']['id'],
home_games_es['games']['home_team_id'])
assert relationship.child_name == 'games'
assert relationship.parent_name == 'teams'
def test_relationship_serialization(es):
relationship = Relationship(es['sessions']['id'], es['log']['session_id'])
dictionary = {
'parent_entity_id': 'sessions',
'parent_variable_id': 'id',
'child_entity_id': 'log',
'child_variable_id': 'session_id',
}
assert relationship.to_dictionary() == dictionary
assert Relationship.from_dictionary(dictionary, es) == relationship
|
Featuretools/featuretools
|
featuretools/tests/entityset_tests/test_relationship.py
|
Python
|
bsd-3-clause
| 3,335
|
# -*- coding: UTF-8 -*-
#
# The MIT License
#
# Copyright (c) 2009-2012 Felix Schwarz <felix.schwarz@oss.schwarz.eu>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import copy
import inspect
import sys
import types
import warnings
from pycerberus.compat import reversed, set
from pycerberus.errors import EmptyError, InvalidArgumentsError, InvalidDataError, \
ThreadSafetyError
from pycerberus.i18n import _, GettextTranslation
from pycerberus.lib import SuperProxy
__all__ = ['BaseValidator', 'Validator']
class NoValueSet(object):
pass
class EarlyBindForMethods(type):
super = SuperProxy()
def __new__(cls, classname, direct_superclasses, class_attributes_dict):
validator_class = type.__new__(cls, classname, direct_superclasses, class_attributes_dict)
cls._simulate_early_binding_for_message_methods(validator_class)
return validator_class
def _simulate_early_binding_for_message_methods(cls, validator_class):
# Need to create a dynamic method if messages are defined in a
# class-level dict.
if not callable(validator_class.messages):
messages_dict = validator_class.messages.copy()
def messages(self):
return messages_dict
validator_class.messages = messages
# We need to simulate 'early binding' so that we can reference the
# messages() method which is defined in the class to be created!
def keys(self):
return validator_class.messages(self)
# make sphinx happy
keys.__doc__ = validator_class.keys.__doc__
validator_class.keys = keys
if validator_class.__name__ == 'BaseValidator' or \
getattr(validator_class.message_for_key, 'autogenerated', False):
def message_for_key(self, key, context):
return validator_class.messages(self)[key]
message_for_key.autogenerated = True
# make sphinx happy
message_for_key.__doc__ = validator_class.message_for_key.__doc__
validator_class.message_for_key = message_for_key
_simulate_early_binding_for_message_methods = classmethod(_simulate_early_binding_for_message_methods)
class BaseValidator(object):
"""The BaseValidator implements only the minimally required methods.
Therefore it does not put many constraints on you. Most users probably want
to use the ``Validator`` class which already implements some commonly used
features.
You can pass ``messages`` a dict of messages during instantiation to
overwrite messages specified in the validator without the need to create
a subclass."""
__metaclass__ = EarlyBindForMethods
super = SuperProxy()
def __init__(self, messages=None):
if not messages:
return
old_messages = self.messages
old_message_for_key = self.message_for_key
def messages_(self):
all_messages = old_messages()
all_messages.update(messages)
return all_messages
def message_for_key(self, key, context):
if key in messages:
return messages[key]
return old_message_for_key(key, context)
self.messages = self._new_instancemethod(messages_)
self.message_for_key = self._new_instancemethod(message_for_key)
def _new_instancemethod(self, method):
if sys.version_info < (3,):
return types.MethodType(method, self, self.__class__)
return types.MethodType(method, self)
def messages(self):
"""Return all messages which are defined by this validator as a
key/message dictionary. Alternatively you can create a class-level
dictionary which contains these keys/messages.
You must declare all your messages here so that all keys are known
after this method was called.
Calling this method might be costly when you have a lot of messages and
returning them is expensive. You can reduce the overhead in some
situations by implementing ``message_for_key()``"""
return {}
def copy(self):
"""Return a copy of this instance."""
clone = copy.copy(self)
was_frozen = False
if hasattr(clone, 'is_internal_state_frozen'):
was_frozen = clone.is_internal_state_frozen()
clone.set_internal_state_freeze(False)
# deepcopy only copies instance-level attribute but we need to copy also
# class-level attributes to support the declarative syntax properly.
# I did not want to add more metaclass magic (that's already complicated
# enough).
klass = self.__class__
for name in dir(clone):
if name in ('__dict__', '__doc__', '__module__', '__slotnames__',
'__weakref__', 'super'):
continue
elif not hasattr(klass, name):
# this is an instance-specific attribute/method, already copied
continue
clone_value = getattr(clone, name)
klass_value = getattr(klass, name)
if id(clone_value) != id(klass_value):
continue
if name.startswith('__') and callable(clone_value):
continue
elif inspect.isroutine(clone_value):
continue
if hasattr(clone_value, 'copy'):
copied_value = clone_value.copy()
else:
copied_value = copy.copy(clone_value)
setattr(clone, name, copied_value)
if was_frozen:
clone.set_internal_state_freeze(True)
return clone
def message_for_key(self, key, context):
"""Return a message for a specific key. Implement this method if you
want to avoid calls to messages() which might be costly (otherwise
implementing this method is optional)."""
raise NotImplementedError('message_for_key() should have been replaced by a metaclass')
def keys(self):
"""Return all keys defined by this specific validator class."""
raise NotImplementedError('keys() should have been replaced by a metaclass')
def raise_error(self, key, value, context, errorclass=InvalidDataError, **values):
"""Raise an InvalidDataError for the given key."""
msg_template = self.message_for_key(key, context)
raise errorclass(msg_template % values, value, key=key, context=context)
def error(self, *args, **kwargs):
warnings.warn("BaseValidator.error() is deprecated. Please use 'raise_error' instead!", DeprecationWarning)
self.raise_error(*args, **kwargs)
def process(self, value, context=None):
"""This is the method to validate your input. The validator returns a
(Python) representation of the given input ``value``.
In case of errors a ``InvalidDataError`` is thrown."""
return value
def revert_conversion(self, value, context=None):
"""Undo the conversion of ``process()`` and return a "string-like"
representation. This method is especially useful for widget libraries
like ToscaWigets so they can render Python data types in a human
readable way.
The returned value does not have to be an actual Python string as long
as it has a meaningful unicode() result. Generally the validator
should accept the return value in its '.process()' method."""
if value is None:
return None
return unicode(value)
def to_string(self, *args, **kwargs):
warnings.warn("BaseValidator.to_string() is deprecated. Please use 'revert_conversion' instead!", DeprecationWarning)
self.revert_conversion(*args, **kwargs)
class Validator(BaseValidator):
"""The Validator is the base class of most validators and implements
some commonly used features like required values (raise exception if no
value was provided) or default values in case no value is given.
This validator splits conversion and validation into two separate steps:
When a value is ``process()``ed, the validator first calls ``convert()``
which performs some checks on the value and eventually returns the converted
value. Only if the value was converted correctly, the ``validate()``
function can do additional checks on the converted value and possibly raise
an Exception in case of errors. If you only want to do additional checks
(but no conversion) in your validator, you can implement ``validate()`` and
simply assume that you get the correct Python type (e.g. int).
Of course if you can also raise a ``ValidationError`` inside of ``convert()`` -
often errors can only be detected during the conversion process.
By default, a validator will raise an ``InvalidDataError`` if no value was
given (unless you set a default value). If ``required`` is False, the
default is None. All exceptions thrown by validators must be derived from
``ValidationError``. Exceptions caused by invalid user input should use
``InvalidDataError`` or one of the subclasses.
If ``strip`` is True (default is False) and the input value has a ``strip()``
method, the input will be stripped before it is tested for empty values and
passed to the ``convert()``/``validate()`` methods.
In order to prevent programmer errors, an exception will be raised if
you set ``required`` to True but provide a default value as well.
"""
def __init__(self, default=NoValueSet, required=NoValueSet, strip=False, messages=None):
self.super(messages=messages)
self._default = default
self._required = required
self._check_argument_consistency()
self._strip_input = strip
self._implementations, self._implementation_by_class = self._freeze_implementations_for_class()
if self.is_internal_state_frozen() not in (True, False):
self._is_internal_state_frozen = True
# --------------------------------------------------------------------------
# initialization
def _check_argument_consistency(self):
if self.is_required(set_explicitely=True) and self._has_default_value_set():
msg = 'Set default value (%s) has no effect because a value is required.' % repr(self._default)
raise InvalidArgumentsError(msg)
def _has_default_value_set(self):
return (self._default is not NoValueSet)
def _freeze_implementations_for_class(self):
class_for_key = {}
implementations_for_class = {}
known_functions = set()
for cls in reversed(inspect.getmro(self.__class__)):
if not self._class_defines_custom_keys(cls, known_functions):
continue
defined_keys = cls.keys(self)
if cls == self.__class__:
cls = self
defined_keys = self.keys()
known_functions.add(cls.keys)
for key in defined_keys:
class_for_key[key] = self._implementations_by_key(cls)
implementations_for_class[cls] = class_for_key[key]
return class_for_key, implementations_for_class
def _implementations_by_key(self, cls):
implementations_by_key = dict()
for name in ['translation_parameters', 'keys', 'message_for_key', 'translate_message']:
implementations_by_key[name] = getattr(cls, name)
return implementations_by_key
def _class_defines_custom_keys(self, cls, known_functions):
return hasattr(cls, 'keys') and cls.keys not in known_functions
# --------------------------------------------------------------------------
# Implementation of BaseValidator API
def messages(self):
return {'empty': _('Value must not be empty.')}
def exception(self, key, value, context, errorclass=InvalidDataError,
error_dict=None, error_list=(), **values):
translated_message = self.message(key, context, **values)
return errorclass(translated_message, value, key=key, context=context,
error_dict=error_dict, error_list=error_list)
def raise_error(self, key, value, context, errorclass=InvalidDataError,
error_dict=None, error_list=(), **values):
raise self.exception(key, value, context, errorclass=errorclass,
error_dict=error_dict, error_list=error_list, **values)
def process(self, value, context=None):
if context is None:
context = {}
if self._strip_input and hasattr(value, 'strip'):
value = value.strip()
value = super(Validator, self).process(value, context)
if self.is_empty(value, context) == True:
if self.is_required() == True:
self.raise_error('empty', value, context, errorclass=EmptyError)
return self.empty_value(context)
converted_value = self.convert(value, context)
self.validate(converted_value, context)
return converted_value
# --------------------------------------------------------------------------
# Defining a convenience API
def convert(self, value, context):
"""Convert the input value to a suitable Python instance which is
returned. If the input is invalid, raise an ``InvalidDataError``."""
return value
def validate(self, converted_value, context):
"""Perform additional checks on the value which was processed
successfully before (otherwise this method is not called). Raise an
InvalidDataError if the input data is invalid.
You can implement only this method in your validator if you just want to
add additional restrictions without touching the actual conversion.
This method must not modify the ``converted_value``."""
pass
# REFACT: rename to default_value()
def empty_value(self, context):
"""Return the 'empty' value for this validator (usually None)."""
if self._default is NoValueSet:
return None
return self._default
def is_empty(self, value, context):
"""Decide if the value is considered an empty value."""
return (value is None)
def is_required(self, set_explicitely=False):
if self._required == True:
return True
elif (not set_explicitely) and (self._required == NoValueSet):
return True
return False
# -------------------------------------------------------------------------
# i18n: public API
def translation_parameters(self, context):
return {'domain': 'pycerberus'}
def translate_message(self, key, native_message, translation_parameters, context):
# This method can be overridden on a by-class basis to get translations
# to support non-gettext translation mechanisms (e.g. from a db)
return GettextTranslation(**translation_parameters).ugettext(native_message)
def message(self, key, context, **values):
# This method can be overridden globally to use a different message
# lookup / translation mechanism altogether
native_message = self._implementation(key, 'message_for_key', context)(key)
translation_parameters = self._implementation(key, 'translation_parameters', context)()
translation_function = self._implementation(key, 'translate_message', context)
translated_template = translation_function(key, native_message, translation_parameters)
return translated_template % values
# -------------------------------------------------------------------------
# private
def _implementation(self, key, methodname, context):
def context_key_wrapper(*args):
method = self._implementations[key][methodname]
args = list(args) + [context]
if self._is_unbound(method):
return method(self, *args)
return method(*args)
return context_key_wrapper
def _is_unbound(self, method):
if sys.version_info < (3,):
return (method.im_self is None)
return (getattr(method, '__self__', None) is None)
def is_internal_state_frozen(self):
is_frozen = getattr(self, '_is_internal_state_frozen', NoValueSet)
if is_frozen == NoValueSet:
return None
return bool(is_frozen)
def set_internal_state_freeze(self, is_frozen):
self.__dict__['_is_internal_state_frozen'] = is_frozen
def __setattr__(self, name, value):
"Prevent non-threadsafe use of Validators by unexperienced developers"
if self.is_internal_state_frozen():
raise ThreadSafetyError('Do not store state in a validator instance as this violates thread safety.')
self.__dict__[name] = value
# -------------------------------------------------------------------------
|
gpatonay/popy
|
pycerberus/api.py
|
Python
|
mit
| 18,239
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization', '0003_organization_color'),
]
operations = [
migrations.AddField(
model_name='location',
name='color',
field=models.CharField(blank=True, max_length=6, verbose_name='Color', validators=[django.core.validators.RegexValidator(regex=b'^[0-9a-zA-Z]{6}$', message='Enter a valid hexadecimal color')]),
preserve_default=True,
),
]
|
Inter-Actief/alexia
|
alexia/apps/organization/migrations/0004_location_color.py
|
Python
|
bsd-3-clause
| 626
|
#!/usr/bin/env python
# Standard library imports
import argparse
import json
import logging
import logging.handlers
import threading
import time
# Additional library imports
import bottle
import RPIO
# The state history is stored in a flat file.
HISTORY_FILENAME = 'history.log'
# These are logical pin numbers based on the Broadcom
# chip on the Raspberry Pi model A/B (not the plus).
TRIGGER_PIN = 17
OPEN_SENSOR = 25
CLOSED_SENSOR = 24
VIBRATION_SENSOR = 23
# Parse the command line parameters.
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', dest='debug', action='store_true', help='enable debug logging')
parser.add_argument('-l', '--log', help='path and file name for log files (will use console if not specified)')
args = parser.parse_args()
# Configure the logging module.
logformat = '%(asctime)s : %(levelname)s : %(name)s : %(message)s'
loglevel = logging.DEBUG if args.debug else logging.INFO
if args.log:
loghandler = logging.handlers.RotatingFileHandler(args.log, maxBytes=16*1024*1024, backupCount=16)
logging.basicConfig(handlers=[loghandler], format=logformat, level=loglevel)
else:
logging.basicConfig(format=logformat, level=loglevel)
# Remember the most recent state so the history file doesn't
# require parsing whenever state transitions take place.
currstate = {'name': None, 'time': 0.0}
# If no vibration events occur within this time
# threshold, assume the door has stopped moving.
VIBRATION_TIMEOUT = 3.0
# Stores the timout object that disables vibration.
vibrationtimer = None
def readhistory(num=0):
"""
Get state history items from.
@param num: The number of items to read
@return: List of state items
"""
try:
with open(HISTORY_FILENAME, 'r') as historyfile:
history = [json.loads(line) for line in historyfile]
return history[-num:]
except OSError:
return []
def writehistory(state):
"""
Write a state item to the history file.
@param state: The state object to write
"""
with open(HISTORY_FILENAME, 'a') as historyfile:
historyfile.write(json.dumps(state) + '\n')
def updatestate(name):
"""
Update the current state.
@param name: The name of the new state
"""
currstate = {'time': int(time.time() * 1000), 'name': name}
writehistory(currstate)
def evaluatestate(sensor, status):
"""
Determine the new state given a sensor event.
@param sensor: The sensor event name
@param status: The value of the sensor
"""
# The open and closed sensors authoritatively determine state
if sensor == 'open':
updatestate('open' if status else 'closing')
elif sensor == 'closed':
updatestate('closed' if status else 'opening')
# Otherwise fall back to the vibration sensor, but only override
# the current state if the door is neither open nor closed.
elif sensor == 'vibration':
if status:
if currstate.get('name') == 'half-open':
updatestate('closing')
elif currstate.get('name') == 'half-closed':
updatestate('opening')
else:
if currstate.get('name') == 'opening':
updatestate('half-open')
elif currstate.get('name') == 'closing':
updatestate('half-closed')
def handleopen(id, value):
"""
Process the change of the open sensor.
@param id: The GPIO pin identifier for the sensor (ignored)
@param value: The GPIO pin value where 0 = open and 1 = closed
"""
status = value != 0
evaluatestate('open', status)
logging.info('Open sensor changed to ' + str(status))
def handleclosed(id, value):
"""
Process the change of the closed sensor.
@param id: The GPIO pin identifier for the sensor (ignored)
@param value: The GPIO pin value where 0 = open and 1 = closed
"""
status = value != 0
evaluatestate('closed', status)
logging.info('Closed sensor changed to ' + str(status))
def setvibration(status=False):
"""
Set the vibration value.
@param status: The new vibration status
"""
evaluatestate('vibration', status)
logging.info('Vibration sensor changed to ' + str(status))
def handlevibration(id, value):
"""
Process a vibration sensor event by cancelling and rescheduling
the disable event, and then setting vibration to true.
@param id: The GPIO pin identifier for the sensor (ignored)
@param value: The GPIO pin value (ignored)
"""
global vibrationtimer
try:
vibrationtimer.cancel()
except (AttributeError, RuntimeError):
pass
vibrationtimer = threading.Timer(VIBRATION_TIMEOUT, setvibration)
vibrationtimer.start()
setvibration(True)
def handletrigger():
"""Figuratively 'press the door button' by briefly closing the relay."""
RPIO.output(TRIGGER_PIN, False)
time.sleep(0.2)
RPIO.output(TRIGGER_PIN, True)
logging.info('Trigger occurred')
@bottle.post('/_trigger')
def posttrigger():
"""
Trigger the garage door.
@return: 204 NO CONTENT if the door is triggered successfully
500 INTERNAL SERVER ERROR with an {error.json} document if an unknown problem occurs
"""
bottle.response.status = 204
handletrigger()
@bottle.get('/history')
def gethistory():
"""
Get a list of states that represent door history.
@return: 200 OK with a {history.json} document if the data is fetched successfully
500 INTERNAL SERVER ERROR with an {error.json} document if an unknown problem occurs
"""
num = int(bottle.request.query.get('n') or 0)
return {'history': readhistory(num)}
@bottle.get('/')
@bottle.get('/<filename:path>')
def getfile(filename='index.html'):
"""
Serve static content to clients, setting the cache value to one year.
@param filename: The full path to the content being requested
@return: 200 OK with the content if retrieved successfully
404 NOT FOUND with an {error.json} document if the content is not available
500 INTERNAL SERVER ERROR with an {error.json} document if an unknown problem occurs
"""
response = bottle.static_file(filename, root='www')
response.set_header('Cache-Control', 'max-age=31557600')
return response
@bottle.error(400)
@bottle.error(404)
@bottle.error(422)
@bottle.error(500)
@bottle.error(504)
def _error(error):
"""
Return an error message to clients.
@param error: The error message from an above function
@return: The appropriate error code with the {error.json} document
"""
bottle.response.set_header('Cache-Control', 'no-cache')
bottle.response.set_header('Content-Type', 'application/json')
return json.dumps({'details': error.body})
def setupgpio():
"""Perform all necessary GPIO initialization."""
# Use logical numbering because that's the only
# thing compatible with the interrupt callbacks.
RPIO.setmode(RPIO.BCM)
# Set up the trigger output pin and ensure it is set to True, i.e. relay open.
RPIO.setup(TRIGGER_PIN, RPIO.OUT)
RPIO.output(TRIGGER_PIN, True)
# Set up callbacks that fire when sensor state changes. Note there's
# no debounce value on the vibration pin, because using one would
# suppress the very small changes the code is trying to detect.
RPIO.add_interrupt_callback(OPEN_SENSOR, handleopen, debounce_timeout_ms=100)
RPIO.add_interrupt_callback(CLOSED_SENSOR, handleclosed, debounce_timeout_ms=100)
RPIO.add_interrupt_callback(VIBRATION_SENSOR, handlevibration)
# An additional setup call is required to ensure the pullup state
# is set properly. Since the sensors are wired as normally closed
# this allows the "switch closed" state to read as true.
RPIO.setup(OPEN_SENSOR, RPIO.IN, pull_up_down=RPIO.PUD_UP)
RPIO.setup(CLOSED_SENSOR, RPIO.IN, pull_up_down=RPIO.PUD_UP)
RPIO.setup(VIBRATION_SENSOR, RPIO.IN, pull_up_down=RPIO.PUD_UP)
# Start the thread that watches for events and calls the interrupt handlers.
RPIO.wait_for_interrupts(threaded=True)
def cleanupgpio():
"""Release all GPIO resources."""
RPIO.cleanup_interrupts()
RPIO.cleanup()
def runwebserver():
"""Set up and run the webserver. This routine does not return until process termination."""
logging.getLogger('waitress').setLevel(loglevel)
bottle.run(server='waitress', host='0.0.0.0', port=80, quiet=(not args.debug), debug=args.debug)
# Start things up if running as the main module. Be sure to tidy up when done.
if __name__ == '__main__':
try:
setupgpio()
runwebserver()
finally:
cleanupgpio()
|
lordjabez/garage-envoy
|
garage-envoy.py
|
Python
|
mit
| 8,718
|
#Напишите программу, которая выводит имя, под которым скрывается Михаил Николаевич Румянцев.
#Дополнительно необходимо вывести область интересов указанной личности, место рождения,
#годы рождения и смерти (если человек умер), вычислить возраст на данный момент (или момент смерти).
#Для хранения всех необходимых данных требуется использовать переменные.
#После вывода информации программа должна дожидаться пока пользователь нажмет Enter для выхода.
# Кодзоков М.М., 25.05.2016
name="Михаил Николаевич Румянцев"
hobby="кино"
place_birth="Санкт-Петербург"
year_birth=1901
year_death=1983
age=2016-1901
print(name+"- cоветский артист цирка, актёр. Герой Социалистического Труда. Народный артист СССР")
print("Место рождения: "+place_birth)
print("Годы жизни:",year_birth,"-",year_death)
print("Возраст:",age)
print("Область деятельности: "+hobby)
input("Нажмите ENTER для продолжения")
|
Mariaanisimova/pythonintask
|
INBa/2015/KODZOKOV_M_M/task_4_9.py
|
Python
|
apache-2.0
| 1,454
|
# from koala.protocol import Koala
# from protocol import Koala
import sys, math, random
from routing_table import RoutingTable, NeighborEntry
from message import Message
from protocol import Koala
from util import Util
class Node(object):
DC_SIZE = 100
WORLD_SIZE = 100
MAGIC = 2
MAX_VAL = 999999
A = B = C = D = 1
A = 1 / float(WORLD_SIZE)
# B = 99999
"""docstring for Node"""
def __init__(self, nid, dc_id):
super(Node, self).__init__()
self.node_id = nid
self.dc_id = dc_id
self.id = "%s-%s" % (dc_id, nid)
self.rt = RoutingTable()
self.latency_x_dc = {}
@staticmethod
def from_dict(json_node):
node = Node(json_node['node_id'], json_node['dc_id'])
node.latency_x_dc = json_node.get('latency_x_dc', {})
node.rt = RoutingTable.from_dict(json_node['rt'])
return node
def __str__(self):
return "(id: %s)" % self.id
def join(self, boot_id):
if not Koala.add_node(self):
return
if boot_id and len(boot_id):
self.try_set_neighbour(boot_id, 0, 0)
self.send(boot_id, Message('rt', {'c': True, 'rt': self.rt}))
def send(self, dest_id, msg):
# print '%s talked to %s' % (self.id, dest_id)
return Koala.send_to(self.id, dest_id, msg)
def on_receive(self, source, msg):
# self.update_links(source, msg)
# self.search_long_links_in_path(msg)
if msg.type == 'route':
return self.on_route(source, msg)
if msg.type == 'rt':
self.update_rt(source, msg)
if msg.type == 'ngn':
self.on_new_global_neighbours(source, msg)
return None
def update_rt(self, source, msg):
rt = msg.content['rt']
chain = msg.content['c']
source_old_neig= msg.content.get('old_neighbors')
# then update my rt with the nodes i received
new_neighbours = []
rec_neighbours = rt.get_all_neighbours()
if source_old_neig:
rec_neighbours.extend(source_old_neig)
rec_neighbours.append(source) # maybe source could be a potential neighbour
neigh_before = self.rt.get_neighbours_ids()
dc_before = [Node.dc_id(nb) for nb in neigh_before]
dc_before.append(self.dc_id)
source_joining = source.is_joining()
self_joining = self.is_joining()
old_neighbors = []
for rec_neighbour in rec_neighbours:
is_source = rec_neighbour.id == source.id
if rec_neighbour.id != self.id:
if self_joining and self.is_local(source.id):
dc_before.append(Node.dc_id(rec_neighbour.id))
l = msg.latency if is_source else rec_neighbour.latency
lq = self.get_lq(is_source, source.id, rec_neighbour)
res, oldies = self.try_set_neighbour(rec_neighbour.id, l, lq)
old_neighbors.extend(oldies)
self.update_latency_x_dc(rec_neighbour.id, l, lq)
if res == 2 or (res == 1 and is_source and source_joining):
new_neighbours.append(NeighborEntry(rec_neighbour.id, l))
elif res < 0 and rec_neighbour.id == source.id:
dest = self.route_to(source.id, msg)
msg.referrer = self
msg.content['c'] = True
source.send(dest, msg)
dc_before = list(set(dc_before))
self.update_latencies()
# some neighbours might have been overwritten, we send only to the neighbors .
neigh_after = self.rt.get_neighbours_ids()
for new_n in new_neighbours:
if new_n.id in neigh_after and new_n.id not in neigh_before or new_n.id == source.id:
if self.is_local(new_n.id):
self.send(new_n.id, Message('rt', {'c': True, 'rt': self.rt, 'old_neighbors': old_neighbors}))
else:
new_dc = Node.dc_id(new_n.id) not in dc_before
if new_dc and not self_joining:
self.broadcast_global_neighbor(new_n)
if not self.is_local(source.id) and (chain or new_dc):
self.send(new_n.id, Message('rt', {'c': False, 'rt': self.rt, 'old_neighbors':old_neighbors}))
def update_latency_x_dc(self, id, l, lq):
# if Node.dc_id(id) == self.
if lq > 1:
self.latency_x_dc[Node.dc_id(id)] = l
# here we can update the long links and, if we care enough, the visited as well
links = self.rt.get_all_neighbours(RoutingTable.GLOBALS)
for ln in links:
if ln and ln.id == id and lq >= ln.lq:
ln.latency = l
ln.lq = lq
def update_latencies(self):
ln = [self.rt.globals.predecessor, self.rt.globals.successor]
ln.extend(self.rt.globals.visited)
ln.extend(self.rt.globals.longlinks)
for n in ln:
if n and n.lq < 2 and Node.dc_id(n.id) in self.latency_x_dc.keys():
n.lq = 2
n.latency = self.latency_x_dc[Node.dc_id(n.id)]
def get_lq(self, is_source, source_id, ne):
if is_source:
return 3
if self.is_local(source_id) and ne.lq > 1:
return 2
return 1
def broadcast_global_neighbor(self, gn):
print 'broadcast dc %s in dc %s' % (Node.dc_id(gn.id), self.dc_id)
candidates = self.create_random_ids(Node.MAGIC)
ln = [self.rt.locals.predecessor, self.rt.locals.successor]
cnt = {'candidates': candidates, 'gn': gn}
msg = Message('ngn', cnt)
for n in ln:
if n:
self.send(n.id, msg)
def on_new_global_neighbours(self, source, msg):
cands = msg.content['candidates']
gn = msg.content['gn']
respees = []
for c in cands:
if self.is_responsible(c):
if len(respees) == 0:
self.send(gn.id, Message('rt', {'c': False, 'rt': self.rt}))
respees.append(c)
rnes, _ = self.try_set_neighbour(gn.id, gn.latency, 2)
if rnes != 2:
return
new_cands = list(set(cands)-set(respees))
add_cands = self.create_random_ids(len(respees) - 1)
new_cands.extend(add_cands)
msg.content['candidates'] = new_cands
target = self.rt.locals.predecessor.id
if source.id == self.rt.locals.predecessor.id:
target = self.rt.locals.successor.id
self.send(target, msg)
def is_responsible(self, id):
if not self.rt.locals.successor:
return False
return Node.distance(self.id, id) < Node.distance(self.rt.locals.successor.id, id) \
and Node.distance(self.id, id) < Node.distance(self.rt.locals.predecessor.id, id)
def create_random_ids(self, nr):
if nr <= 0:
return []
rids =[]
while len(rids) != nr:
rand_id = '%s-%s' % (self.dc_id, random.randint(0, Node.DC_SIZE))
if not self.is_responsible(rand_id):
rids.append(rand_id)
rids = list(set(rids))
return rids
def on_route(self, source, msg):
nid = msg.content
self.update_latency_x_dc(source.id, msg.latency, 3)
self.update_latencies()
if nid != self.id:
dest = self.route_to(nid, msg)
return self.send(dest, msg)
else:
return msg.path
def is_joining(self):
neighs = self.rt.get_all_neighbours()
return len(neighs) == 1 and neighs[0].latency == 0
# return len(self.rt.get_all_neighbours() .get_neighbours_ids()) == 1
def alone_in_dc(self):
return self.rt.locals.successor == None
def try_set_neighbour(self, nid, latency, lq):
local = self.is_local(nid)
added_s = added_p = -1
old_s = old_p = None
ne = NeighborEntry(nid, latency, lq)
if self.is_successor(nid):
added_s, old_s = self.rt.locals.set_successor(ne) if local else self.rt.globals.set_successor(ne)
if self.is_predecessor(nid):
added_p, old_p = self.rt.locals.set_predecessor(ne) if local else self.rt.globals.set_predecessor(ne)
ret = max(added_s, added_p)
if ret == 1:
ret += 1
if ret ==-1 and (self.can_be_predecessor(nid) or self.can_be_successor(nid)):
ret = 1
old_neighs = [n for n in (old_s, old_p) if n is not None]
# 2: added, 1: potential neighbor, 0: updated , -1:not neighbor
return ret, old_neighs
# def route_to(self, nid, msg):
# local = self.is_local(nid)
#
# succ = self.rt.locals.successor.id if local else self.rt.globals.successor.id
# pred = self.rt.locals.predecessor.id if local else self.rt.globals.predecessor.id
#
# d_from_succ = Node.distance(nid, succ)
# d_from_pred = Node.distance(nid, pred)
# if d_from_pred < d_from_succ:
# return pred
# else:
# return succ
def route_to(self, dest, msg):
max = 0
ret = None
rt = self.rt.get_all_neighbours(RoutingTable.ALL)
for re in rt:
v = self.get_route_value(dest, re)
if v > max:
max = v
ret = re
return ret.id
def get_route_value(self, dest, re):
res = 0
if Node.distance(self.id, dest) < Node.distance(re.id, dest): #current node is better than sending it to this entry
res = -1
if self.dc_id == Node.dc_id(re.id): # the entry is local
res = Node.A * Node.distance(self.id, re.id) # prefer long local links, they potentially know things I don't know
if Node.distance(self.id, dest) > Node.distance(re.id, dest):
tot_distance = Node.distance(self.id, dest)
distance = Node.distance(self.id, re.id) / float(tot_distance)
norm_latency = Util.normalize_latency(tot_distance, re.latency)
res = 1 + Node.B * distance + Node.C * norm_latency
if Node.dc_id(dest) == Node.dc_id(re.id):
res = Node.MAX_VAL - Node.A * Node.distance(re.id, dest)
return res
# def get_rt_entry(self, node_id):
# merged = self.rt.get_all_neighbours()
# for e in merged:
# if e.id == node_id:
# return e
# return None
def to_dict(self):
return {
'node_id': self.node_id, 'dc_id': self.dc_id, 'id': self.id, 'rt': self.rt.to_dict(), 'latency_x_dc': self.latency_x_dc
}
def is_local(self, nid):
ndc = Node.dc_id(nid)
return self.dc_id == ndc
def is_neighbour(self, nid):
if self.is_successor(nid):
return True
if self.is_predecessor(nid):
return True
return False
def is_successor(self, nid):
local = self.is_local(nid)
successor = self.rt.locals.successor if local else self.rt.globals.successor
predecessor = self.rt.locals.predecessor if local else self.rt.globals.predecessor
if self.can_be_successor(nid):
if not successor or local or Node.compare_global(nid, successor.id) != 0:
return True
if predecessor.id == successor.id:
return True
if nid != predecessor.id and Node.distance(self.id, nid, True) <= Node.distance(self.id, successor.id, True):
return True
return False
def is_predecessor(self, nid):
local = self.is_local(nid)
predecessor = self.rt.locals.predecessor if local else self.rt.globals.predecessor
successor = self.rt.locals.successor if local else self.rt.globals.successor
if self.can_be_predecessor(nid):
if not predecessor or local or Node.compare_global(nid, predecessor.id) != 0:
return True
if predecessor.id == successor.id:
return True
if nid != successor.id and Node.distance(self.id, nid, True) <= Node.distance(self.id, predecessor.id, True):
return True
return False
def can_be_neighbour(self, nid):
if self.can_be_successor(nid):
return True
if self.can_be_predecessor(nid):
return True
return False
def can_be_successor(self, node_id):
local = self.is_local(node_id)
successor = self.rt.locals.successor if local else self.rt.globals.successor
cpr_fnct = Node.compare_local if local else Node.compare_global
if not successor:
return True
else:
# at the moment we keep only one successor
if (cpr_fnct(node_id, successor.id) <= 0 and cpr_fnct(successor.id, self.id) < 0) or \
(cpr_fnct(node_id, successor.id) >= 0 and cpr_fnct(successor.id, self.id) < 0 and cpr_fnct(node_id, self.id) > 0) or \
(cpr_fnct(node_id, successor.id) <= 0 and cpr_fnct(node_id, self.id) > 0) :
return True
return False
def can_be_predecessor(self, node_id):
local = self.is_local(node_id)
predecessor = self.rt.locals.predecessor if local else self.rt.globals.predecessor
cpr_fnct = Node.compare_local if local else Node.compare_global
if not predecessor:
return True
else:
if (cpr_fnct(node_id, predecessor.id) >= 0 and cpr_fnct(predecessor.id, self.id) > 0) or \
(cpr_fnct(node_id, predecessor.id) <= 0 and cpr_fnct(predecessor.id, self.id) > 0 and cpr_fnct(node_id, self.id) < 0) or \
(cpr_fnct(node_id, predecessor.id) >= 0 and cpr_fnct(node_id, self.id) < 0):
return True
return False
@staticmethod
def compare(id1, id2):
splt1 = id1.split('-')
splt2 = id2.split('-')
dc_id1 = splt1[0]
dc_id2 = splt2[0]
if dc_id1 != dc_id2:
ret = int(dc_id1 > dc_id2)
ret = ret if ret > 0 else -1
return ret
n_id1 = int(splt1[1])
n_id2 = int(splt2[1])
if n_id1 == n_id2:
return 0
ret = int(n_id1 > n_id2)
ret = ret if ret > 0 else -1
return ret
@staticmethod
def compare_local(id1, id2):
splt1 = id1.split('-')
splt2 = id2.split('-')
n_id1 = int(splt1[1])
n_id2 = int(splt2[1])
if n_id1 == n_id2:
return 0
ret = int(n_id1 > n_id2)
ret = ret if ret > 0 else -1
return ret
@staticmethod
def compare_global(id1, id2):
splt1 = id1.split('-')
splt2 = id2.split('-')
dc_id1 = splt1[0]
dc_id2 = splt2[0]
if dc_id1 != dc_id2:
ret = int(dc_id1 > dc_id2)
ret = ret if ret > 0 else -1
return ret
return 0
@staticmethod
def dc_id(nid):
return nid.split('-')[0]
@staticmethod
def n_id(nid):
return int(nid.split('-')[1])
@staticmethod
def distance(src_id, target_id, forcelocal=False):
local = False
if Node.dc_id(src_id) == Node.dc_id(target_id) or forcelocal:
local = True
src_id = Node.n_id(src_id) if local else Node.dc_id(src_id)
target_id = Node.n_id(target_id) if local else Node.dc_id(target_id)
a = src_id if local else ord(src_id)
b = target_id if local else ord(target_id)
if src_id > target_id:
a = target_id if local else ord(target_id)
b = src_id if local else ord(src_id)
size = Node.DC_SIZE if local else Node.WORLD_SIZE
d1 = b - a
d2 = (size - b + a) % size
return min(d1, d2)
|
gtato/koala
|
prototype/koala/node.py
|
Python
|
gpl-3.0
| 15,912
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The niftyreg module provides classes for interfacing with the `NiftyFit`_ command line tools.
Top-level namespace for niftyfit
"""
from .base import (Info)
from .dwi import (FitDwi, DwiTool)
from .asl import (FitAsl)
|
fprados/nipype
|
nipype/interfaces/niftyfit/__init__.py
|
Python
|
bsd-3-clause
| 336
|
class A:
x = 1
y = 'hello'
class B:
z = 'bye'
class C(A,B):
def salutation(self):
return '%d %s %s' % (self.x, self.y, self.z)
inst = C()
print inst.salutation()
inst.x = 100
print inst.salutation()
|
jjack15/CS402-Project-UTK
|
OnlinePythonTutor/v1-v2/tutorials/oop/oop_demo.py
|
Python
|
agpl-3.0
| 215
|
#!/usr/bin/python2
import sys
import csv
import os
def main(hippiefile, pairfile, out):
"""A script to take a file output by the HIPPIE script and match the pairs in it to a file containing protein pairs:
input:
hippiefile - output of HIPPIE script
pairfile - list of protein pairs
output:
out - all the pairs it could find in the HIPPIE file with their confidence values"""
# Reading in the HIPPIE file
#initialise csv reader
c = csv.reader(open(hippiefile), delimiter="\t")
#make dictionary using frozensets as keys with the confidence scores as values
hippieids = {}
for line in c:
k = frozenset([line[1],line[3]])
hippieids[k] = line[4]
# Reading in the file of protein pairs
#initialise csv reader
c = csv.reader(open(pairfile), delimiter="\t")
#make dictionary using frozensets as keys:
posids = {}
for line in c:
line = frozenset(line)
posids[line] = 1
# Write new file:
#then rewrite the training.positive.HIPPIE.txt file:
c = csv.writer(open(out, "w"), delimiter="\t")
for k in hippieids.keys():
try:
if posids[k]:
l = list(k)
try:
c.writerow([l[0],l[1],hippieids[k]])
except:
#ignore self-interactions
pass
except KeyError:
#ignore missing pairs
pass
#how many lines does the new file have?
count = int(os.popen("wc -l < " + out).read())
# Report how well it went:
print "%i of %i pairs matched."%(count,len(posids.keys()))
return None
if __name__=="__main__":
if sys.argv[1] == "-h":
print "Usage: python2 hippiematch.py hippiefile pairfile outputfile"
print "Where: "
print " hippiefile is the out of the HIPPIE script"
print " pairfile is the file of protein pairs to match"
print " outputfile is the name of the file to output to"
else:
main(*sys.argv[1:])
|
gngdb/opencast-bio
|
scripts/hippiematch.py
|
Python
|
mit
| 2,056
|
import hashlib
import zlib
###
# Returns MD5 checksum of argument data encoded in UTF-8
#
def md5Checksum(data):
"""
>>> file = open('tmp.txt', 'w')
>>> file.close()
>>> data = open('tmp.txt', 'r').read()
>>> md5Checksum(data.encode())
'd41d8cd98f00b204e9800998ecf8427e'
"""
md5 = hashlib.md5()
md5.update(data)
return md5.hexdigest()
###
# Returns SHA-256 checksum of argument data encoded in UTF-8
#
def sha256Checksum(data):
"""
>>> file = open('tmp.txt', 'w')
>>> file.close()
>>> data = open('tmp.txt', 'r').read()
>>> sha256Checksum(data.encode())
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
"""
sha = hashlib.sha256()
sha.update(data)
return sha.hexdigest()
def sha512Checksum(data):
"""
>>> file = open('tmp.txt', 'w')
>>> file.close()
>>> data = open('tmp.txt', 'r').read()
>>> sha512Checksum(data.encode())
'cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e'
"""
sha = hashlib.sha512()
sha.update(data)
return sha.hexdigest()
###
# Returns CRC32 checksum of data
#
def crc32Checksum(data):
"""
>>> file = open('tmp.txt', 'w')
>>> file.close()
>>> data = open('tmp.txt', 'r').read()
>>> crc32Checksum(data.encode())
'0'
"""
return str(zlib.crc32(data) & 0xFFFFFFFF)
|
aturki/pyATK
|
pyATK/Misc/Crypto.py
|
Python
|
mit
| 1,498
|
import sys
import ceph_medic
import logging
from ceph_medic import runner, collector
from tambo import Transport
logger = logging.getLogger(__name__)
def as_list(string):
if not string:
return []
string = string.strip(',')
# split on commas
string = string.split(',')
# strip spaces
return [x.strip() for x in string]
class Check(object):
help = "Run checks for all the configured nodes in a cluster or hosts file"
long_help = """
check: Run for all the configured nodes in the configuration
Options:
--ignore Comma-separated list of errors and warnings to ignore.
Loaded Config Path: {config_path}
Configured Nodes:
{configured_nodes}
"""
def __init__(self, argv=None, parse=True):
self.argv = argv or sys.argv
@property
def subcommand_args(self):
# find where `check` is
index = self.argv.index('check')
# slice the args
return self.argv[index:]
def _help(self):
node_section = []
for daemon, node in ceph_medic.config.nodes.items():
header = "\n* %s:\n" % daemon
body = '\n'.join([" %s" % n for n in ceph_medic.config.nodes[daemon]])
node_section.append(header+body+'\n')
return self.long_help.format(
configured_nodes=''.join(node_section),
config_path=ceph_medic.config.config_path
)
def main(self):
options = ['--ignore']
config_ignores = ceph_medic.config.file.get_list('check', '--ignore')
parser = Transport(
self.argv, options=options,
check_version=False
)
parser.catch_help = self._help()
parser.parse_args()
ignored_codes = as_list(parser.get('--ignore', ''))
# fallback to the configuration if nothing is defined in the CLI
if not ignored_codes:
ignored_codes = config_ignores
if len(self.argv) < 1:
return parser.print_help()
# populate the nodes metadata with the configured nodes
for daemon in ceph_medic.config.nodes.keys():
ceph_medic.metadata['nodes'][daemon] = []
for daemon, nodes in ceph_medic.config.nodes.items():
for node in nodes:
node_metadata = {'host': node['host']}
if 'container' in node:
node_metadata['container'] = node['container']
ceph_medic.metadata['nodes'][daemon].append(node_metadata)
collector.collect()
test = runner.Runner()
test.ignore = ignored_codes
results = test.run()
runner.report(results)
#XXX might want to make this configurable to not bark on warnings for
# example, setting forcefully for now, but the results object doesn't
# make a distinction between error and warning (!)
if results.errors or results.warnings:
sys.exit(1)
|
alfredodeza/ceph-doctor
|
ceph_medic/check.py
|
Python
|
mit
| 2,944
|
#!/usr/bin/env python
from PIL import Image
import sys
sys.path.insert(0, r'../python/')
import encode
lut = [[0.8487,0.84751182,0.84479598,0.840213,0.83359314,0.8257851,0.814752,0.80006949,0.78216192,0.76060494,0.73658673,0.7086645,0.67777182,0.64475739,0.60987582,0.57134484,0.52729731,0.48562614,0.45167814],[0,0.0838426,0.1676852,0.2515278,0.3353704,0.419213,0.5030556,0.5868982,0.67182264,0.75336633,0.83518048,0.91537187,0.99339958,1.06872269,1.14066505,1.20841528,1.27035062,1.31998003,1.3523]]
print len(lut[0]),len(lut)
img = Image.new('RGBA', (len(lut[0]), len(lut)), (0,0,0,0))
pixels = img.load()
for y in range(len(lut)):
for x in range(len(lut[0])):
pixels[x,y] = encode.toRGBA(lut[y][x],'number')
pixels[x,y] = encode.toRGBA(lut[y][x],'number')
img.save(open('lut.png', 'w'))
|
tangrams/data2image
|
example/lut.py
|
Python
|
mit
| 817
|
"""
Test for JsonResponse and JsonResponseBadRequest util classes.
"""
import json
import unittest
import mock
from django.http import HttpResponse, HttpResponseBadRequest
from util.json_request import JsonResponse, JsonResponseBadRequest
class JsonResponseTestCase(unittest.TestCase):
"""
A set of tests to make sure that JsonResponse Class works correctly.
"""
def test_empty(self):
resp = JsonResponse()
self.assertIsInstance(resp, HttpResponse)
self.assertEqual(resp.content.decode('utf-8'), "")
self.assertEqual(resp.status_code, 204)
self.assertEqual(resp["content-type"], "application/json")
def test_empty_string(self):
resp = JsonResponse("")
self.assertIsInstance(resp, HttpResponse)
self.assertEqual(resp.content.decode('utf-8'), "")
self.assertEqual(resp.status_code, 204)
self.assertEqual(resp["content-type"], "application/json")
def test_string(self):
resp = JsonResponse("foo")
self.assertEqual(resp.content.decode('utf-8'), '"foo"')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp["content-type"], "application/json")
def test_dict(self):
obj = {"foo": "bar"}
resp = JsonResponse(obj)
compare = json.loads(resp.content.decode('utf-8'))
self.assertEqual(obj, compare)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp["content-type"], "application/json")
def test_set_status_kwarg(self):
obj = {"error": "resource not found"}
resp = JsonResponse(obj, status=404)
compare = json.loads(resp.content.decode('utf-8'))
self.assertEqual(obj, compare)
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp["content-type"], "application/json")
def test_set_status_arg(self):
obj = {"error": "resource not found"}
resp = JsonResponse(obj, 404)
compare = json.loads(resp.content.decode('utf-8'))
self.assertEqual(obj, compare)
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp["content-type"], "application/json")
def test_encoder(self):
obj = [1, 2, 3]
encoder = object()
with mock.patch.object(json, "dumps", return_value="[1,2,3]") as dumps:
resp = JsonResponse(obj, encoder=encoder)
self.assertEqual(resp.status_code, 200)
compare = json.loads(resp.content.decode('utf-8'))
self.assertEqual(obj, compare)
kwargs = dumps.call_args[1]
self.assertIs(kwargs["cls"], encoder)
class JsonResponseBadRequestTestCase(unittest.TestCase):
"""
A set of tests to make sure that the JsonResponseBadRequest wrapper class
works as intended.
"""
def test_empty(self):
resp = JsonResponseBadRequest()
self.assertIsInstance(resp, HttpResponseBadRequest)
self.assertEqual(resp.content.decode("utf-8"), "")
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp["content-type"], "application/json")
def test_empty_string(self):
resp = JsonResponseBadRequest("")
self.assertIsInstance(resp, HttpResponse)
self.assertEqual(resp.content.decode('utf-8'), "")
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp["content-type"], "application/json")
def test_dict(self):
obj = {"foo": "bar"}
resp = JsonResponseBadRequest(obj)
compare = json.loads(resp.content.decode('utf-8'))
self.assertEqual(obj, compare)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp["content-type"], "application/json")
def test_set_status_kwarg(self):
obj = {"error": "resource not found"}
resp = JsonResponseBadRequest(obj, status=404)
compare = json.loads(resp.content.decode('utf-8'))
self.assertEqual(obj, compare)
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp["content-type"], "application/json")
def test_set_status_arg(self):
obj = {"error": "resource not found"}
resp = JsonResponseBadRequest(obj, 404)
compare = json.loads(resp.content.decode('utf-8'))
self.assertEqual(obj, compare)
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp["content-type"], "application/json")
def test_encoder(self):
obj = [1, 2, 3]
encoder = object()
with mock.patch.object(json, "dumps", return_value="[1,2,3]") as dumps:
resp = JsonResponseBadRequest(obj, encoder=encoder)
self.assertEqual(resp.status_code, 400)
compare = json.loads(resp.content.decode('utf-8'))
self.assertEqual(obj, compare)
kwargs = dumps.call_args[1]
self.assertIs(kwargs["cls"], encoder)
|
cpennington/edx-platform
|
common/djangoapps/util/tests/test_json_request.py
|
Python
|
agpl-3.0
| 4,846
|
#!/usr/bin/env python
'''
Pymodbus Asynchronous Client Examples
--------------------------------------------------------------------------
The following is an example of how to use the asynchronous modbus
client implementation from pymodbus.
'''
#---------------------------------------------------------------------------#
# import needed libraries
#---------------------------------------------------------------------------#
from twisted.internet import reactor, protocol
from pymodbus.constants import Defaults
#---------------------------------------------------------------------------#
# choose the requested modbus protocol
#---------------------------------------------------------------------------#
from pymodbus.client.async import ModbusClientProtocol
#from pymodbus.client.async import ModbusUdpClientProtocol
#---------------------------------------------------------------------------#
# configure the client logging
#---------------------------------------------------------------------------#
import logging
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
#---------------------------------------------------------------------------#
# helper method to test deferred callbacks
#---------------------------------------------------------------------------#
def dassert(deferred, callback):
def _assertor(value): assert(value)
deferred.addCallback(lambda r: _assertor(callback(r)))
deferred.addErrback(lambda _: _assertor(False))
#---------------------------------------------------------------------------#
# specify slave to query
#---------------------------------------------------------------------------#
# The slave to query is specified in an optional parameter for each
# individual request. This can be done by specifying the `unit` parameter
# which defaults to `0x00`
#---------------------------------------------------------------------------#
def exampleRequests(client):
rr = client.read_coils(1, 1, unit=0x02)
#---------------------------------------------------------------------------#
# example requests
#---------------------------------------------------------------------------#
# simply call the methods that you would like to use. An example session
# is displayed below along with some assert checks. Note that unlike the
# synchronous version of the client, the asynchronous version returns
# deferreds which can be thought of as a handle to the callback to send
# the result of the operation. We are handling the result using the
# deferred assert helper(dassert).
#---------------------------------------------------------------------------#
def beginAsynchronousTest(client):
rq = client.write_coil(1, True)
rr = client.read_coils(1,1)
dassert(rq, lambda r: r.function_code < 0x80) # test that we are not an error
dassert(rr, lambda r: r.bits[0] == True) # test the expected value
rq = client.write_coils(1, [True]*8)
rr = client.read_coils(1,8)
dassert(rq, lambda r: r.function_code < 0x80) # test that we are not an error
dassert(rr, lambda r: r.bits == [True]*8) # test the expected value
rq = client.write_coils(1, [False]*8)
rr = client.read_discrete_inputs(1,8)
dassert(rq, lambda r: r.function_code < 0x80) # test that we are not an error
dassert(rr, lambda r: r.bits == [True]*8) # test the expected value
rq = client.write_register(1, 10)
rr = client.read_holding_registers(1,1)
dassert(rq, lambda r: r.function_code < 0x80) # test that we are not an error
dassert(rr, lambda r: r.registers[0] == 10) # test the expected value
rq = client.write_registers(1, [10]*8)
rr = client.read_input_registers(1,8)
dassert(rq, lambda r: r.function_code < 0x80) # test that we are not an error
dassert(rr, lambda r: r.registers == [17]*8) # test the expected value
arguments = {
'read_address': 1,
'read_count': 8,
'write_address': 1,
'write_registers': [20]*8,
}
rq = client.readwrite_registers(**arguments)
rr = client.read_input_registers(1,8)
dassert(rq, lambda r: r.registers == [20]*8) # test the expected value
dassert(rr, lambda r: r.registers == [17]*8) # test the expected value
#-----------------------------------------------------------------------#
# close the client at some time later
#-----------------------------------------------------------------------#
reactor.callLater(1, client.transport.loseConnection)
reactor.callLater(2, reactor.stop)
#---------------------------------------------------------------------------#
# extra requests
#---------------------------------------------------------------------------#
# If you are performing a request that is not available in the client
# mixin, you have to perform the request like this instead::
#
# from pymodbus.diag_message import ClearCountersRequest
# from pymodbus.diag_message import ClearCountersResponse
#
# request = ClearCountersRequest()
# response = client.execute(request)
# if isinstance(response, ClearCountersResponse):
# ... do something with the response
#
#---------------------------------------------------------------------------#
#---------------------------------------------------------------------------#
# choose the client you want
#---------------------------------------------------------------------------#
# make sure to start an implementation to hit against. For this
# you can use an existing device, the reference implementation in the tools
# directory, or start a pymodbus server.
#---------------------------------------------------------------------------#
defer = protocol.ClientCreator(reactor, ModbusClientProtocol
).connectTCP("localhost", Defaults.Port)
defer.addCallback(beginAsynchronousTest)
reactor.run()
|
mjfarmer/scada_py
|
pymodbus/examples/common/asynchronous-client.py
|
Python
|
gpl-3.0
| 5,916
|
import time
class Challenge:
MAX_TAGS = 5
def __init__(self, ctf_channel_id, channel_id, name, category):
"""
An object representation of an ongoing challenge.
ctf_channel_id : The slack id for the associated parent ctf channel
channel_id : The slack id for the associated channel
name : The name of the challenge
category : The category of the challenge
"""
self.channel_id = channel_id
self.ctf_channel_id = ctf_channel_id
self.name = name
self.category = category
self.players = {}
self.is_solved = False
self.solver = None
self.solve_date = 0
self.tags = []
def mark_as_solved(self, solver_list, solve_date=None):
"""
Mark a challenge as solved.
solver_list : List of usernames, that solved the challenge.
solve_date : Time of solve (epoch) (None: current time / value: set to specified value).
"""
self.is_solved = True
self.solver = solver_list
self.solve_date = solve_date or int(time.time())
def unmark_as_solved(self):
"""
Unmark a challenge as solved.
"""
self.is_solved = False
self.solver = None
def add_tag(self, tag):
"""
Update the list of tags for this challenge by adding the given tag.
Return True if a modification was made, False otherwise.
"""
dirty = False
if tag not in self.tags and len(self.tags) < self.MAX_TAGS:
# The tag doesn't exist and there's room to add it, let's do so
self.tags.append(tag)
dirty = True
return dirty
def remove_tag(self, tag):
"""
Update the list of tags for this challenge by removing the given tag.
Return True if a modification was made, False otherwise.
"""
dirty = False
if tag in self.tags:
# The tag exists, let's remove it
self.tags.remove(tag)
dirty = True
return dirty
def add_player(self, player):
"""
Add a player to the list of working players.
"""
self.players[player.user_id] = player
def remove_player(self, user_id):
"""
Remove a player from the list of working players using a given slack
user ID.
"""
try:
del self.players[user_id]
except KeyError:
# TODO: Should we allow this to percolate up to the caller?
pass
|
OpenToAllCTF/OTA-Challenge-Bot
|
bottypes/challenge.py
|
Python
|
mit
| 2,557
|
from rest_framework import serializers
from drf_haystack.serializers import HaystackSerializerMixin
from .models import {{ cookiecutter.model_name }}
from .search_indexes import {{ cookiecutter.model_name }}Index
class {{ cookiecutter.model_name }}Serializer(serializers.ModelSerializer):
class Meta:
model = {{ cookiecutter.model_name }}
fields = '__all__'
class {{ cookiecutter.model_name }}SearchSerializer(HaystackSerializerMixin, {{ cookiecutter.model_name }}Serializer):
groupby_key = serializers.SerializerMethodField()
def get_groupby_key(self, obj):
return obj._meta.verbose_name_plural.title()
class Meta({{ cookiecutter.model_name }}Serializer.Meta):
index_classes = [{{ cookiecutter.model_name }}Index]
|
rickydunlop/cookiecutter-django-app-template-drf-haystack
|
{{cookiecutter.app_name}}/serializers.py
|
Python
|
mit
| 771
|
from __future__ import absolute_import
# The celery app must be loaded here to make the @shared_task decorator work.
from .celery import app as celery_app
|
rmyers/dtrove-ui
|
raxui/__init__.py
|
Python
|
mit
| 156
|
########################################################################
#
# File Name: HTMLDocument.py
#
#
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
from pyxml.dom import Node
from pyxml.dom import NotSupportedErr
from pyxml.dom.Document import Document
from pyxml.dom import implementation
from pyxml.dom import ext
import string, sys
from pyxml.dom.html import HTML_DTD
class HTMLDocument(Document):
def __init__(self):
Document.__init__(self, None)
# These only make sense in a browser environment, therefore
# they never change
self.__dict__['__referrer'] = ''
self.__dict__['__domain'] = None
self.__dict__['__URL'] = ''
self.__dict__['__cookie'] = ''
self.__dict__['__writable'] = 0
self.__dict__['_html'] = vars(sys.modules['pyxml.dom.html'])
### Attribute Methods ###
def _get_URL(self):
return self.__dict__['__URL']
def _get_anchors(self):
anchors = self.getElementsByTagName('A');
anchors = filter(lambda x: x._get_name(), anchors)
return implementation._4dom_createHTMLCollection(anchors)
def _get_applets(self):
al = self.getElementsByTagName('APPLET')
ol = self.getElementsByTagName('OBJECT')
ol = filter(lambda x: x._get_code(), ol)
return implementation._4dom_createHTMLCollection(al+ol)
def _get_body(self):
body = ''
#Try to find the body or FRAMESET
elements = self.getElementsByTagName('FRAMESET')
if not elements:
elements = self.getElementsByTagName('BODY')
if elements:
body = elements[0]
else:
#Create a body
body = self.createElement('BODY')
self.documentElement.appendChild(body)
return body
def _set_body(self, newBody):
elements = self.getElementsByTagName('FRAMESET')
if not elements:
elements = self.getElementsByTagName('BODY')
if elements:
# Replace the existing one
elements[0].parentNode.replaceChild(newBody, elements[0])
else:
# Add it
self.documentElement.appendChild(newBody)
def _get_cookie(self):
return self.__dict__['__cookie']
def _set_cookie(self, cookie):
self.__dict__['__cookie'] = cookie
def _get_domain(self):
return self.__dict__['__domain']
def _get_forms(self):
forms = self.getElementsByTagName('FORM')
return implementation._4dom_createHTMLCollection(forms)
def _get_images(self):
images = self.getElementsByTagName('IMG')
return implementation._4dom_createHTMLCollection(images)
def _get_links(self):
areas = self.getElementsByTagName('AREA')
anchors = self.getElementsByTagName('A')
links = filter(lambda x: x._get_href(), areas+anchors)
return implementation._4dom_createHTMLCollection(links)
def _get_referrer(self):
return self.__dict__['__referrer']
def _get_title(self):
elements = self.getElementsByTagName('TITLE')
if elements:
#Take the first
title = elements[0]
title.normalize()
if title.firstChild:
return title.firstChild.data
return ''
def _set_title(self, title):
# See if we can find the title
title_nodes = self.getElementsByTagName('TITLE')
if title_nodes:
title_node = title_nodes[0]
title_node.normalize()
if title_node.firstChild:
title_node.firstChild.data = title
return
else:
title_node = self.createElement('TITLE')
self._4dom_getHead().appendChild(title_node)
text = self.createTextNode(title)
title_node.appendChild(text)
### Methods ###
def close(self):
self.__dict__['__writable'] = 0
def getElementsByName(self, elementName):
return self._4dom_getElementsByAttribute('*', 'NAME', elementName)
def open(self):
#Clear out the doc
self.__dict__['__referrer'] = ''
self.__dict__['__domain'] = None
self.__dict__['__url'] = ''
self.__dict__['__cookie'] = ''
self.__dict__['__writable'] = 1
def write(self, st):
if not self.__dict__['__writable']:
return
#We need to parse the string here
from pyxml.dom.ext.reader.HtmlLib import FromHTML
d = FromHtml(st, self)
if d != self:
self.appendChild(d)
def writeln(self, st):
st = st + '\n'
self.write(st)
def getElementByID(self, ID):
hc = self._4dom_getElementsByAttribute('*','ID',ID)
if hc.length != 0:
return hc[0]
return None
### Overridden Methods ###
def createElement(self, tagName):
return self._4dom_createHTMLElement(tagName)
def createElementNS(self, namespace, qname):
return self._4dom_createHTMLElement(qname)
def createAttribute(self, name):
return Document.createAttribute(self, string.upper(name))
def createCDATASection(*args, **kw):
raise NotSupportedErr()
def createEntityReference(*args, **kw):
raise NotSupportedErr()
def createProcessingInstruction(*args, **kw):
raise NotSupportedErr()
def _4dom_createEntity(*args, **kw):
raise NotSupportedErr()
def _4dom_createNotation(*args, **kw):
raise NotSupportedErr()
### Internal Methods ###
def _4dom_getElementsByAttribute(self, tagName, attribute, attrValue=None):
nl = self.getElementsByTagName(tagName)
hc = implementation._4dom_createHTMLCollection()
for elem in nl:
attr = elem.getAttribute(attribute)
if attrValue == None and attr != '':
hc.append(elem)
elif attr == attrValue:
hc.append(elem)
return hc
def _4dom_getHead(self):
nl = self.getElementsByTagName('HEAD')
if not nl:
head = self.createElement('HEAD')
#The head goes in front of the body
body = self._get_body()
self.documentElement.insertBefore(head, body)
else:
head = nl[0]
return head
def _4dom_createHTMLElement(self, tagName):
lowered = string.lower(tagName)
if not HTML_DTD.has_key(lowered):
raise TypeError('Unknown HTML Element: %s' % tagName)
if lowered in NoClassTags:
from HTMLElement import HTMLElement
return HTMLElement(self, tagName)
#FIXME: capitalize() broken with unicode in Python 2.0
#normTagName = string.capitalize(tagName)
capitalized = string.upper(tagName[0]) + lowered[1:]
element = HTMLTagMap.get(capitalized, capitalized)
module = 'HTML%sElement' % element
if not self._html.has_key(module):
#Try to import it (should never fail)
__import__('pyxml.dom.html.%s' % module)
# Class and module have the same name
klass = getattr(self._html[module], module)
return klass(self, tagName)
def cloneNode(self, deep):
clone = HTMLDocument()
clone.__dict__['__referrer'] = self._get_referrer()
clone.__dict__['__domain'] = self._get_domain()
clone.__dict__['__URL'] = self._get_URL()
clone.__dict__['__cookie'] = self._get_cookie()
if deep:
if self.doctype is not None:
# Cannot have any children, no deep needed
dt = self.doctype.cloneNode(0)
clone._4dom_setDocumentType(dt)
if self.documentElement is not None:
# The root element can have children, duh
root = self.documentElement.cloneNode(1, newOwner=clone)
clone.appendChild(root)
return clone
def isXml(self):
return 0
def isHtml(self):
return 1
### Attribute Access Mappings ###
_readComputedAttrs = Document._readComputedAttrs.copy()
_readComputedAttrs.update ({
'title' : _get_title,
'referrer' : _get_referrer,
'domain' : _get_domain,
'URL' : _get_URL,
'body' : _get_body,
'images' : _get_images,
'applets' : _get_applets,
'links' : _get_links,
'forms' : _get_forms,
'anchors' : _get_anchors,
'cookie' : _get_cookie
})
_writeComputedAttrs = Document._writeComputedAttrs.copy()
_writeComputedAttrs.update ({
'title' : _set_title,
'body' : _set_body,
'cookie' : _set_cookie,
})
# Create the read-only list of attributes
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
Document._readOnlyAttrs + _readComputedAttrs.keys())
# HTML tags that don't map directly to a class name
HTMLTagMap = {'Isindex': 'IsIndex',
'Optgroup': 'OptGroup',
'Textarea': 'TextArea',
'Fieldset': 'FieldSet',
'Ul': 'UList',
'Ol': 'OList',
'Dl': 'DList',
'Dir': 'Directory',
'Li': 'LI',
'P': 'Paragraph',
'H1': 'Heading',
'H2': 'Heading',
'H3': 'Heading',
'H4': 'Heading',
'H5': 'Heading',
'H6': 'Heading',
'Q': 'Quote',
'Blockquote': 'Quote',
'Br': 'BR',
'Basefont': 'BaseFont',
'Hr': 'HR',
'A': 'Anchor',
'Img': 'Image',
'Caption': 'TableCaption',
'Col': 'TableCol',
'Colgroup': 'TableCol',
'Td': 'TableCell',
'Th': 'TableCell',
'Tr': 'TableRow',
'Thead': 'TableSection',
'Tbody': 'TableSection',
'Tfoot': 'TableSection',
'Frameset': 'FrameSet',
'Iframe': 'IFrame',
'Form': 'Form',
'Ins' : 'Mod',
'Del' : 'Mod',
}
#HTML Elements with no specific DOM Interface of their own
NoClassTags = ['sub',
'sup',
'span',
'bdo',
'tt',
'i',
'b',
'u',
's',
'strike',
'big',
'small',
'em',
'strong',
'dfn',
'code',
'samp',
'kbd',
'var',
'cite',
'acronym',
'abbr',
'dd',
'dt',
'noframes',
'noscript',
'address',
'center',
]
|
selfcommit/gaedav
|
pyxml/dom/html/HTMLDocument.py
|
Python
|
lgpl-2.1
| 11,651
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################
# _______ ______ #
# |_ _\ \ / / ___| #
# | | \ \ / /\___ \ #
# | | \ V / ___) | #
# |_| \_/ |____/ #
# #
###################################
# TVS DClone Tool #
# Version 1.0 #
# By: Teske Virtual Systems #
# This tool is release under #
# GPL license, for more #
# details see license.txt file #
###################################
# http://www.teske.net.br #
###################################
import commands
import subprocess
import re
import threading
import signal
import signal
import sys
import os
import gtk
import time
import urllib
import cgi
import math
from simplejson import dumps as to_json
from simplejson import loads as from_json
from webgui import start_gtk_thread
from webgui import launch_browser
from webgui import synchronous_gtk_message
from webgui import asynchronous_gtk_message
from webgui import kill_gtk_thread
disks = []
def LoadDisks():
global disks
x = commands.getstatusoutput("gksudo -D \"DClone Tool\" ./utils.sh")
if x[0] != 0 and x[0] != 256:
print "Este aplicativo precisa das permissões de administrador para funcionar!"
label = gtk.Label("Este aplicativo precisa de permissões de administrador para funcionar.")
dialog = gtk.Dialog("DClone Tool", None, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, (gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
dialog.vbox.pack_start(label)
label.show()
dialog.run()
dialog.destroy()
sys.exit(1)
dk = commands.getoutput("sudo ./utils.sh -g")
dk = dk.split(',')
for disk in dk:
dsk = commands.getoutput("sudo ./utils.sh -s "+disk)
model = commands.getoutput("sudo ./utils.sh -m "+disk)
dsk = dsk.split(' ')
print dsk
if dsk[1] == 'GB':
dsk[0] = float(dsk[0].replace(",",".")) * 1000
elif dsk[1] == 'KB':
dsk[0] = float(dsk[0].replace(",",".")) / 1000
else:
dsk[0] = float(dsk[0].replace(",","."))
dpk = (disk,dsk[0],model)
disks.append(dpk)
def buffered(f):
a = []
while True:
c = f.read(1)
if c == '':
break
elif c == '\r':
yield ''.join(a)
a = []
else:
a.append(c)
class dcfldd:
LINE_MATCH = re.compile(r'\[(.*)\% of (.*)Mb\] (.*) blocks \((.*)Mb\) written. (.*) remaining')
def __init__(self, diskfrom, diskto, totalsize):
global started_copy
if not started_copy:
cmdline = ['/usr/bin/sudo', '/usr/bin/dcfldd', 'sizeprobe=if', 'if='+diskfrom, 'of='+diskto]
print "Iniciando copia de "+diskfrom+" para "+diskto+" no total de "+str(totalsize)+" Mb"
self.process = subprocess.Popen(cmdline, stderr=subprocess.PIPE)
self.thread = threading.Thread(target=self.watch, args=[self.process.stderr])
self.thread.start()
started_copy = True
self.total = totalsize
def kill(self):
os.kill(self.process.pid, signal.SIGINT)
def watch(self, f):
global web_send
for line in buffered(f):
result = self.LINE_MATCH.match(line)
if result:
result = result.groups()
percent = result[0]
self.total = result[1]
mb = result[3]
time = result[4]
sys.stdout.write('%s Mb / %s Mb (%s%% restantes)\r' % (mb, self.total, percent))
sys.stdout.flush()
web_send('updateProgress('+str(mb)+','+str(self.total)+', "'+time+'");');
class Global(object):
quit = False
@classmethod
def set_quit(cls, *args, **kwargs):
cls.quit = True
def nl2br(string, is_xhtml= True ):
if is_xhtml:
return string.replace('\n','<br />')
else :
return string.replace('\n','<br>')
def main():
global disks
global browser
global web_send
global started_copy
global dcfprocess
global window
dcfprocess = None
start_gtk_thread()
started_copy = False
file = os.path.abspath('page.html')
uri = 'file://' + urllib.pathname2url(file)
browser, web_recv, web_send, window = synchronous_gtk_message(launch_browser)(uri,quit_function=Global.set_quit,echo=False,width=640,height=640)
browser.connect("navigation-requested", on_navigation_requested)
while not Global.quit:
time.sleep(1)
def ProcessDiskData(line):
linedata = line.split(None,6)
while len(linedata) < 7:
linedata.append('')
return linedata
def ProcessType(type):
return cgi.escape(type.replace('primary',"Primária").replace('extended',"Extendida").replace('logic',"Lógica"))
def BuildDiskDataHTML(data,disk):
diskdata = GetLoadedDiskData(disk)
base = 'Modelo: '+cgi.escape(diskdata[2])+'<BR>Tamanho total: '+str(diskdata[1])+' MB<BR><center><table width="502" border="0" cellpadding="0" cellspacing="0" style="color: #FFFFFF"> \
<tr> \
<th width="34" height="19" valign="top">ID</td> \
<th width="93" valign="top">Tamanho</td> \
<th width="106" valign="top">Tipo</td> \
<th width="160" valign="top">Sistema de Arquivos </td> \
<th width="109" valign="top">Sinalizador</td> \
</tr> '
dk = data.split('\n')
for line in dk:
id, inicio, fim, tamanho, tipo, fs, sig = ProcessDiskData(line)
base += '<tr><td height="19" valign="top"><center>'+id+'</center></td><td valign="top"><center>'+tamanho+'</center></td><td valign="top"><center>'+ ProcessType(tipo)+'</center></td><td valign="top"><center>'+fs.upper()+'</center></td><td valign="top"><center>'+sig+'</center></td></tr>'
base += '</table></center>'
return base.replace('\n','')
def OpenSaveFile():
global window
filename = None
chooser = gtk.FileChooserDialog("Salvar imagem", window, gtk.FILE_CHOOSER_ACTION_SAVE, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK))
response = chooser.run()
if response == gtk.RESPONSE_OK: filename = chooser.get_filename()
chooser.destroy()
return filename
def OpenLoadFile():
global window
filename = None
chooser = gtk.FileChooserDialog("Abrir imagem", None ,gtk.FILE_CHOOSER_ACTION_OPEN,(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_default_response(gtk.RESPONSE_OK)
chooser.set_modal(False)
response = chooser.run()
if response == gtk.RESPONSE_OK:
filename = chooser.get_filename()
chooser.destroy()
return filename
def GetLoadedDiskData(disk):
global disks
for dsk in disks:
if dsk[0] == disk:
return dsk
return (None,None,None)
def on_navigation_requested(view, frame, req, data=None):
global dcfprocess
uri = req.get_uri()
scheme, function, data =uri.split(':', 2)
if scheme == 'callback':
print uri
if function == '//loaddisks':
for disk in disks:
web_send('addDisk(\''+disk[0]+'\',\''+disk[2]+'\');');
#web_send('addDisk(\'RAW\',\'Arquivo\');');
elif function == '//loaddiskdata':
data = data.split(':')
disk_data = commands.getoutput("sudo ./utils.sh -d "+data[1])
#html_data = nl2br(cgi.escape(disk_data))
html_data = BuildDiskDataHTML(disk_data,data[1])
if data[0] == 'origem':
web_send('setDisk(\''+html_data+'\',true)');
else:
web_send('setDisk(\''+html_data+'\',false)');
elif function == '//startclone':
data = data.split(':')
origindata = GetLoadedDiskData(data[1])
print "Disco Origem: "
print origindata
destindata = GetLoadedDiskData(data[2])
print "Disco Destino: "
print destindata
print "Iniciando dcfldd para /dev/null"
dcfprocess = dcfldd(data[1],data[2], origindata[1])
elif function == '//selectfilesource':
filename = OpenLoadFile()
print filename
if not filename == None:
web_send('addDiskOrg(\''+filename+'\',\'RAW\');');
elif function == '//selectfiledestiny':
filename = OpenSaveFile()
print filename
if not filename == None:
web_send('addDiskDest(\''+filename+'\',\'RAW\');');
elif function == '//exit':
sys.exit(0)
return True
else:
return False
def my_quit_wrapper(fun):
signal.signal(signal.SIGINT, Global.set_quit)
def fun2(*args, **kwargs):
try:
x = fun(*args, **kwargs) # equivalent to "apply"
finally:
kill_gtk_thread()
Global.set_quit()
if dcfprocess != None:
dcfprocess.kill()
print "Saindo..."
return x
return fun2
if __name__ == '__main__': # <-- this line is optional
LoadDisks()
my_quit_wrapper(main)()
|
TeskeVirtualSystem/CloneInterface
|
clone.py
|
Python
|
gpl-2.0
| 8,341
|
#!/usr/bin/python
import sys
import signal
import logging
import daemon
import sniffer
class Service(daemon.Daemon):
"""
Usage:
service = Service(options)
service.run() # Interactive
service = Service(options)
service.start() # Daemon
"""
def __init__(self, options):
"""
Inits daemon.
"""
daemon.Daemon.__init__(self, options.pidfile)
self.options = options
def run(self):
"""
Main event loop.
"""
logging.info("Starting service.")
# Signal handler
def signal_handler(signum, frame):
"""
Terminates child processes.
"""
logging.info("Stopping service.")
try:
sys.exit(0)
except (OSError, AttributeError):
pass
signal.signal(signal.SIGINT, signal_handler)
sniffer.capture_process(self.options)
|
mkatircioglu/urlsniffer
|
urlsniffer/service.py
|
Python
|
gpl-2.0
| 992
|
import requests
import bs4
import webbrowser
import sys
# retrieve top search result links
print('googling...')
url = 'https://www.google.com'
print(url)
# res = requests.get(url)
proxies = {
'https': 'https://127.0.0.1:1080',
'http': 'http://127.0.0.1:1080'
}
# headers = {
# 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
# }
res = requests.get(url, proxies=proxies)
res.raise_for_status()
# open a browser tab for each result
soup = bs4.BeautifulSoup(res.text, 'html.parser')
linkElems = soup.select('.footborder')
print(linkElems)
numOpen = min(5, len(linkElems))
# for i in range(numOpen):
# webbrowser.open('https://www.google.com' + linkElems[i].get('href'))
|
sallyyoo/ced2
|
py/practice/11/testLucky.py
|
Python
|
mit
| 768
|
# coding:utf-8
"""
# decorator_run.py
#
# Copyright(C) by AbsentM. 2018
#
# Author: AbsentM
# Date: 2018/02/10
#
# Description:
# Use decorator function to execute simple run flow.
#
# decorator_run.py == simple_sun.py
#
"""
def wrapper(func):
"""
Define a wrapper function, and use function as params.
:param func: A function as param
:return: A new function
"""
def inner_func(*args, **kwargs):
"""
A real inner function to run parammter function.
:param args: default args
:param kwargs: default more args
:return: None
"""
print "Entering function "
func(*args, **kwargs)
print "Exiting function"
return inner_func
@wrapper
def show_message():
"""
Define a function to show some info msg.
:return: None
"""
print "Hello everyone!"
def decorator_main_run():
"""
Main test function
:return: None
"""
print "----------------------------------"
show_message()
print "----------------------------------"
if __name__ == '__main__':
decorator_main_run()
|
absentm/Demo
|
Python-demo/decorator-demo/decorator_run.py
|
Python
|
mit
| 1,023
|
from pandac.PandaModules import *
from direct.distributed.ClockDelta import *
from direct.task.Task import Task
from direct.interval.IntervalGlobal import *
from TrolleyConstants import *
from toontown.golf import GolfGlobals
from toontown.toonbase import ToontownGlobals
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.distributed import DelayDelete
from toontown.toonbase.ToontownTimer import ToontownTimer
from direct.task.Task import Task
from direct.showbase import PythonUtil
from toontown.toon import ToonDNA
from direct.showbase import RandomNumGen
from toontown.battle.BattleSounds import *
class DistributedPicnicBasket(DistributedObject.DistributedObject):
seatState = Enum('Empty, Full, Eating')
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPicnicBasket')
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.localToonOnBoard = 0
self.seed = 0
self.random = None
self.picnicCountdownTime = base.config.GetFloat('picnic-countdown-time', ToontownGlobals.PICNIC_COUNTDOWN_TIME)
self.picnicBasketTrack = None
self.fsm = ClassicFSM.ClassicFSM('DistributedTrolley', [State.State('off', self.enterOff, self.exitOff, ['waitEmpty', 'waitCountdown']), State.State('waitEmpty', self.enterWaitEmpty, self.exitWaitEmpty, ['waitCountdown']), State.State('waitCountdown', self.enterWaitCountdown, self.exitWaitCountdown, ['waitEmpty'])], 'off', 'off')
self.fsm.enterInitialState()
self.__toonTracks = {}
return
def generate(self):
DistributedObject.DistributedObject.generate(self)
self.loader = self.cr.playGame.hood.loader
self.foodLoader = ['phase_6/models/golf/picnic_sandwich.bam',
'phase_6/models/golf/picnic_apple.bam',
'phase_6/models/golf/picnic_cupcake.bam',
'phase_6/models/golf/picnic_chocolate_cake.bam']
self.fullSeat = []
self.food = []
for i in xrange(4):
self.food.append(None)
self.fullSeat.append(self.seatState.Empty)
self.picnicItem = 0
return
def announceGenerate(self):
self.picnicTable = self.loader.geom.find('**/*picnic_table_' + str(self.tableNumber))
self.picnicTableSphereNodes = []
self.numSeats = 4
self.seats = []
self.jumpOffsets = []
self.basket = None
for i in xrange(self.numSeats):
self.seats.append(self.picnicTable.find('**/*seat%d' % (i + 1)))
self.jumpOffsets.append(self.picnicTable.find('**/*jumpOut%d' % (i + 1)))
self.tablecloth = self.picnicTable.find('**/basket_locator')
DistributedObject.DistributedObject.announceGenerate(self)
for i in xrange(self.numSeats):
self.picnicTableSphereNodes.append(self.seats[i].attachNewNode(CollisionNode('picnicTable_sphere_%d_%d' % (self.getDoId(), i))))
self.picnicTableSphereNodes[i].node().addSolid(CollisionSphere(0, 0, 0, 2))
self.tableclothSphereNode = self.tablecloth.attachNewNode(CollisionNode('tablecloth_sphere'))
self.tableclothSphereNode.node().addSolid(CollisionSphere(0, 0, -1, 4))
angle = self.startingHpr[0]
angle -= 90
radAngle = deg2Rad(angle)
unitVec = Vec3(math.cos(radAngle), math.sin(radAngle), 0)
unitVec *= 30.0
self.endPos = self.startingPos + unitVec
dist = Vec3(self.endPos - self.enteringPos).length()
wheelAngle = dist / (0.5 * 1.4 * math.pi) * 360
self.seatNumber = 0
self.clockNode = ToontownTimer()
self.clockNode.setPos(1.16, 0, -0.83)
self.clockNode.setScale(0.3)
self.clockNode.hide()
return
def disable(self):
DistributedObject.DistributedObject.disable(self)
self.fsm.request('off')
self.clearToonTracks()
for i in xrange(self.numSeats):
del self.picnicTableSphereNodes[0]
del self.picnicTableSphereNodes
self.notify.debug('Deleted self loader ' + str(self.getDoId()))
self.picnicTable.removeNode()
self.picnicBasketTrack = None
return
def delete(self):
self.notify.debug('Golf kart getting deleted: %s' % self.getDoId())
DistributedObject.DistributedObject.delete(self)
del self.fsm
def setState(self, state, seed, timestamp):
self.seed = seed
if not self.random:
self.random = RandomNumGen.RandomNumGen(seed)
self.fsm.request(state, [globalClockDelta.localElapsedTime(timestamp)])
def handleEnterPicnicTableSphere(self, i, collEntry):
self.seatNumber = i
self.notify.debug('Entering Picnic Table Sphere.... %s' % self.getDoId())
self.loader.place.detectedPicnicTableSphereCollision(self)
def handleEnterPicnicTable(self, i):
toon = base.localAvatar
self.sendUpdate('requestBoard', [i])
def fillSlot0(self, avId):
self.fillSlot(0, avId)
def fillSlot1(self, avId):
self.fillSlot(1, avId)
def fillSlot2(self, avId):
self.fillSlot(2, avId)
def fillSlot3(self, avId):
self.fillSlot(3, avId)
def fillSlot(self, index, avId):
self.notify.debug('fill Slot: %d for %d' % (index, avId))
if avId == 0:
pass
else:
self.fullSeat[index] = self.seatState.Full
if avId == base.localAvatar.getDoId():
self.clockNode.show()
if index == 0 or index == 3:
side = -1
else:
side = 1
if hasattr(self.loader.place, 'trolley'):
self.loader.place.trolley.fsm.request('boarding', [self.tablecloth, side])
else:
self.notify.warning('fillSlot no trolley in place')
self.localToonOnBoard = 1
if avId == base.localAvatar.getDoId():
if hasattr(self.loader.place, 'trolley'):
self.loader.place.trolley.fsm.request('boarded')
self.loader.place.trolley.exitButton.hide()
if avId in self.cr.doId2do:
toon = self.cr.doId2do[avId]
toon.stopSmooth()
toon.wrtReparentTo(self.tablecloth)
sitStartDuration = toon.getDuration('sit-start')
jumpTrack = self.generateToonJumpTrack(toon, index)
track = Sequence(jumpTrack, Func(toon.setAnimState, 'Sit', 1.0))
self.notify.debug('### fillSlot: fullSeat = %s' % self.fullSeat)
if self.fullSeat.count(0) == 3:
self.notify.debug('### fillSlot: adding basketAppear')
if self.picnicBasketTrack:
self.picnicBasketTrack.finish()
waitDuration = track.getDuration()
self.picnicBasketTrack = Sequence(Wait(waitDuration), self.generateBasketAppearTrack())
self.picnicBasketTrack.start()
track.append(self.generateFoodAppearTrack(index))
track.append(Sequence(Func(self.clearToonTrack, avId), name=toon.uniqueName('fillTrolley'), autoPause=1))
if avId == base.localAvatar.getDoId():
if hasattr(self.loader.place, 'trolley'):
track.append(Func(self.loader.place.trolley.exitButton.show))
track.delayDelete = DelayDelete.DelayDelete(toon, 'PicnicBasket.fillSlot')
self.storeToonTrack(avId, track)
track.start()
def emptySlot0(self, avId, timestamp):
self.emptySlot(0, avId, timestamp)
def emptySlot1(self, avId, timestamp):
self.emptySlot(1, avId, timestamp)
def emptySlot2(self, avId, timestamp):
self.emptySlot(2, avId, timestamp)
def emptySlot3(self, avId, timestamp):
self.emptySlot(3, avId, timestamp)
def notifyToonOffTrolley(self, toon):
toon.setAnimState('neutral', 1.0)
if hasattr(base, 'localAvatar') and toon == base.localAvatar:
if hasattr(self.loader.place, 'trolley'):
self.loader.place.trolley.handleOffTrolley()
self.localToonOnBoard = 0
else:
toon.startSmooth()
def emptySlot(self, index, avId, timestamp):
def emptySeat(index):
self.notify.debug('### seat %s now empty' % index)
self.fullSeat[index] = self.seatState.Empty
if avId == 0:
pass
elif avId == 1:
self.fullSeat[index] = self.seatState.Empty
track = Sequence(self.generateFoodDisappearTrack(index))
self.notify.debug('### empty slot - unexpetected: fullSeat = %s' % self.fullSeat)
if self.fullSeat.count(0) == 4:
self.notify.debug('### empty slot - unexpected: losing basket')
if self.picnicBasketTrack:
self.picnicBasketTrack.finish()
waitDuration = track.getDuration()
self.picnicBasketTrack = Sequence(Wait(waitDuration), self.generateBasketDisappearTrack())
self.picnicBasketTrack.start()
track.start()
else:
self.fullSeat[index] = self.seatState.Empty
if avId in self.cr.doId2do:
if avId == base.localAvatar.getDoId():
if self.clockNode:
self.clockNode.hide()
toon = self.cr.doId2do[avId]
toon.stopSmooth()
sitStartDuration = toon.getDuration('sit-start')
jumpOutTrack = self.generateToonReverseJumpTrack(toon, index)
track = Sequence(jumpOutTrack)
track.append(self.generateFoodDisappearTrack(index))
self.notify.debug('### empty slot: fullSeat = %s' % self.fullSeat)
if self.fullSeat.count(0) == 4:
self.notify.debug('### empty slot: losing basket')
if self.picnicBasketTrack:
self.picnicBasketTrack.finish()
waitDuration = track.getDuration()
self.picnicBasketTrack = Sequence(Wait(waitDuration), self.generateBasketDisappearTrack())
self.picnicBasketTrack.start()
track.append(Sequence(Func(self.notifyToonOffTrolley, toon), Func(self.clearToonTrack, avId), Func(self.doneExit, avId), Func(emptySeat, index), name=toon.uniqueName('emptyTrolley'), autoPause=1))
track.delayDelete = DelayDelete.DelayDelete(toon, 'PicnicBasket.emptySlot')
self.storeToonTrack(avId, track)
track.start()
def rejectBoard(self, avId):
self.loader.place.trolley.handleRejectBoard()
def __enableCollisions(self):
for i in xrange(self.numSeats):
self.accept('enterpicnicTable_sphere_%d_%d' % (self.getDoId(), i), self.handleEnterPicnicTableSphere, [i])
self.accept('enterPicnicTableOK_%d_%d' % (self.getDoId(), i), self.handleEnterPicnicTable, [i])
self.picnicTableSphereNodes[i].setCollideMask(ToontownGlobals.WallBitmask)
def __disableCollisions(self):
for i in xrange(self.numSeats):
self.ignore('enterpicnicTable_sphere_%d_%d' % (self.getDoId(), i))
self.ignore('enterPicnicTableOK_%d_%d' % (self.getDoId(), i))
for i in xrange(self.numSeats):
self.picnicTableSphereNodes[i].setCollideMask(BitMask32(0))
def enterOff(self):
return None
def exitOff(self):
return None
def enterWaitEmpty(self, ts):
self.__enableCollisions()
def exitWaitEmpty(self):
self.__disableCollisions()
def enterWaitCountdown(self, ts):
self.__enableCollisions()
self.accept('trolleyExitButton', self.handleExitButton)
self.clockNode.countdown(self.picnicCountdownTime, self.handleExitButton)
def handleExitButton(self):
self.sendUpdate('requestExit')
self.clockNode.hide()
def exitWaitCountdown(self):
self.__disableCollisions()
self.ignore('trolleyExitButton')
self.clockNode.reset()
def getStareAtNodeAndOffset(self):
return (self.tablecloth, Point3(0, 0, 4))
def storeToonTrack(self, avId, track):
self.clearToonTrack(avId)
self.__toonTracks[avId] = track
def clearToonTrack(self, avId):
oldTrack = self.__toonTracks.get(avId)
if oldTrack:
oldTrack.pause()
DelayDelete.cleanupDelayDeletes(oldTrack)
del self.__toonTracks[avId]
def clearToonTracks(self):
keyList = []
for key in self.__toonTracks:
keyList.append(key)
for key in keyList:
if key in self.__toonTracks:
self.clearToonTrack(key)
def doneExit(self, avId):
if avId == base.localAvatar.getDoId():
self.sendUpdate('doneExit')
def setPosHpr(self, x, y, z, h, p, r):
self.startingPos = Vec3(x, y, z)
self.enteringPos = Vec3(x, y, z - 10)
self.startingHpr = Vec3(h, 0, 0)
def setTableNumber(self, tn):
self.tableNumber = tn
def generateToonJumpTrack(self, av, seatIndex):
av.pose('sit', 47)
hipOffset = av.getHipsParts()[2].getPos(av)
def getToonJumpTrack(av, seatIndex):
def getJumpDest(av = av, node = self.tablecloth):
dest = Vec3(self.tablecloth.getPos(av.getParent()))
seatNode = self.picnicTable.find('**/seat' + str(seatIndex + 1))
dest += seatNode.getPos(self.tablecloth)
dna = av.getStyle()
dest -= hipOffset
if seatIndex == 2 or seatIndex == 3:
dest.setY(dest.getY() + 2 * hipOffset.getY())
dest.setZ(dest.getZ() + 0.2)
return dest
def getJumpHpr(av = av, node = self.tablecloth):
hpr = self.seats[seatIndex].getHpr(av.getParent())
angle = PythonUtil.fitDestAngle2Src(av.getH(), hpr.getX())
hpr.setX(angle)
return hpr
toonJumpTrack = Parallel(ActorInterval(av, 'jump'), Sequence(Wait(0.43), Parallel(LerpHprInterval(av, hpr=getJumpHpr, duration=0.9), ProjectileInterval(av, endPos=getJumpDest, duration=0.9))))
return toonJumpTrack
def getToonSitTrack(av):
toonSitTrack = Sequence(ActorInterval(av, 'sit-start'), Func(av.loop, 'sit'))
return toonSitTrack
toonJumpTrack = getToonJumpTrack(av, seatIndex)
toonSitTrack = getToonSitTrack(av)
jumpTrack = Sequence(Parallel(toonJumpTrack, Sequence(Wait(1), toonSitTrack)), Func(av.wrtReparentTo, self.tablecloth))
return jumpTrack
def generateToonReverseJumpTrack(self, av, seatIndex):
self.notify.debug('av.getH() = %s' % av.getH())
def getToonJumpTrack(av, destNode):
def getJumpDest(av = av, node = destNode):
dest = node.getPos(self.tablecloth)
dest += self.jumpOffsets[seatIndex].getPos(self.tablecloth)
return dest
def getJumpHpr(av = av, node = destNode):
hpr = node.getHpr(av.getParent())
hpr.setX(hpr.getX() + 180)
angle = PythonUtil.fitDestAngle2Src(av.getH(), hpr.getX())
hpr.setX(angle)
return hpr
toonJumpTrack = Parallel(ActorInterval(av, 'jump'), Sequence(Wait(0.1), Parallel(ProjectileInterval(av, endPos=getJumpDest, duration=0.9))))
return toonJumpTrack
toonJumpTrack = getToonJumpTrack(av, self.tablecloth)
jumpTrack = Sequence(toonJumpTrack, Func(av.loop, 'neutral'), Func(av.wrtReparentTo, render))
return jumpTrack
def generateBasketAppearTrack(self):
if self.basket == None:
self.basket = loader.loadModel('phase_6/models/golf/picnic_basket.bam')
self.basket.setScale(0.1)
basketTrack = Sequence(
Func(self.basket.show),
SoundInterval(
globalBattleSoundCache.getSound('GUI_balloon_popup.ogg'),
node=self.basket),
Func(self.basket.reparentTo, self.tablecloth),
Func(self.basket.setPos, 0, 0, 0.2),
Func(self.basket.setHpr, 45, 0, 0),
Func(self.basket.wrtReparentTo, render),
Func(self.basket.setShear, 0, 0, 0),
Sequence(
LerpScaleInterval(
self.basket,
scale=Point3(1.1, 1.1, 0.1),
duration=0.2),
LerpScaleInterval(
self.basket,
scale=Point3(1.6, 1.6, 0.2),
duration=0.1),
LerpScaleInterval(
self.basket,
scale=Point3(1.0, 1.0, 0.4),
duration=0.1),
LerpScaleInterval(
self.basket,
scale=Point3(1.5, 1.5, 2.5),
duration=0.2),
LerpScaleInterval(
self.basket,
scale=Point3(2.5, 2.5, 1.5),
duration=0.1),
LerpScaleInterval(
self.basket,
scale=Point3(2.0, 2.0, 2.0),
duration=0.1),
Func(self.basket.wrtReparentTo, self.tablecloth),
Func(self.basket.setPos, 0, 0, 0)))
return basketTrack
def generateBasketDisappearTrack(self):
if not self.basket:
return Sequence()
pos = self.basket.getPos()
pos.addZ(-1)
basketTrack = Sequence(
LerpScaleInterval(
self.basket,
scale=Point3(2.0, 2.0, 1.8),
duration=0.1),
LerpScaleInterval(
self.basket,
scale=Point3(1.0, 1.0, 2.5),
duration=0.1),
LerpScaleInterval(
self.basket,
scale=Point3(2.0, 2.0, 0.5),
duration=0.2),
LerpScaleInterval(
self.basket,
scale=Point3(0.5, 0.5, 1.0),
duration=0.1),
LerpScaleInterval(
self.basket,
scale=Point3(1.1, 1.1, 0.1),
duration=0.1),
LerpScaleInterval(
self.basket,
scale=Point3(0.1, 0.1, 0.1),
duration=0.2),
SoundInterval(
globalBattleSoundCache.getSound('GUI_balloon_popup.ogg'),
node=self.basket),
Wait(0.2),
LerpPosInterval(
self.basket,
pos=pos,
duration=0.2),
Func(self.basket.hide))
return basketTrack
def generateFoodAppearTrack(self, seat):
if self.fullSeat[seat] == self.seatState.Full:
self.notify.debug('### food appear: self.fullSeat = %s' % self.fullSeat)
if not self.food[seat]:
self.food[seat] = loader.loadModel(self.random.choice(self.foodLoader))
self.notify.debug('### food appear: self.food = %s' % self.food)
self.food[seat].setScale(0.1)
self.food[seat].reparentTo(self.tablecloth)
self.food[seat].setPos(self.seats[seat].getPos(self.tablecloth)[0] / 2, self.seats[seat].getPos(self.tablecloth)[1] / 2, 0)
foodTrack = Sequence(
Func(self.food[seat].show),
SoundInterval(
globalBattleSoundCache.getSound('GUI_balloon_popup.ogg'),
node=self.food[seat]),
Func(self.food[seat].reparentTo, self.tablecloth),
Func(self.food[seat].setHpr, 45, 0, 0),
Func(self.food[seat].wrtReparentTo, render),
Func(self.food[seat].setShear, 0, 0, 0),
Sequence(
LerpScaleInterval(
self.food[seat],
scale=Point3(1.1, 1.1, 0.1),
duration=0.2),
LerpScaleInterval(
self.food[seat],
scale=Point3(1.6, 1.6, 0.2),
duration=0.1),
LerpScaleInterval(
self.food[seat],
scale=Point3(1.0, 1.0, 0.4),
duration=0.1),
LerpScaleInterval(
self.food[seat],
scale=Point3(1.5, 1.5, 2.5),
duration=0.2),
LerpScaleInterval(
self.food[seat],
scale=Point3(2.5, 2.5, 1.5),
duration=0.1),
LerpScaleInterval(
self.food[seat],
scale=Point3(2.0, 2.0, 2.0),
duration=0.1),
Func(self.food[seat].wrtReparentTo, self.tablecloth)))
return foodTrack
else:
return Sequence()
def generateFoodDisappearTrack(self, seat):
if not self.food[seat]:
return Sequence()
pos = self.food[seat].getPos()
pos.addZ(-1.0)
foodTrack = Sequence(
LerpScaleInterval(
self.food[seat],
scale=Point3(2.0, 2.0, 1.8),
duration=0.1),
LerpScaleInterval(
self.food[seat],
scale=Point3(1.0, 1.0, 2.5),
duration=0.1),
LerpScaleInterval(
self.food[seat],
scale=Point3(2.0, 2.0, 0.5),
duration=0.2),
LerpScaleInterval(
self.food[seat],
scale=Point3(0.5, 0.5, 1.0),
duration=0.1),
LerpScaleInterval(
self.food[seat],
scale=Point3(1.1, 1.1, 0.1),
duration=0.1),
LerpScaleInterval(
self.food[seat],
scale=Point3(0.1, 0.1, 0.1),
duration=0.2),
SoundInterval(
globalBattleSoundCache.getSound('GUI_balloon_popup.ogg'),
node=self.food[seat]),
Wait(0.2),
LerpPosInterval(
self.food[seat],
pos=pos,
duration=0.2),
Func(self.food[seat].hide))
return foodTrack
def destroy(self, node):
node.removeNode()
node = None
self.basket.removeNode()
self.basket = None
for food in self.food:
food.removeNode()
self.food = None
self.clockNode.removeNode()
del self.clockNode
self.clockNode = None
return
def setPicnicDone(self):
if self.localToonOnBoard:
if hasattr(self.loader.place, 'trolley'):
self.loader.place.trolley.fsm.request('final')
self.loader.place.trolley.fsm.request('start')
self.localToonOnBoard = 0
messenger.send('picnicDone')
|
Spiderlover/Toontown
|
toontown/safezone/DistributedPicnicBasket.py
|
Python
|
mit
| 23,489
|
#! /usr/bin/env python
# Copyright (c) 2010-2013 Magnus Olsson (magnus@minimum.se)
# See LICENSE for details
"""awsxd - AWS-X GSM weather station daemon
This application implements an server for reception of AWS-X GSM weather
station UDP packets. For each received packet, the contents will be decoded,
verified and inserted into the given MySQL database.
For details on the sensor hardware, see the DSP Promatic webpage at:
http://www.dps-promatic.com/
For details on the GPRS functionality, check:
http://www.dpspro.com/aws_gprs.html
http://www.dps-promatic.com/tcs_meteo_packet.html
usage: awsxd [options]
options:
-p <port> Server listen port number (default 9999)
-h <host> Server listen address (defaults to localhost)
-v Verbose output (may be used multiple times)
-s <str> Simulate <str> input, process and exit
-c <path> Launch <path> for each processed packet (station name passed as arg)
-f <path> Config file (defaults to 'awsxd.conf')
-r <ip[:port]> Replicate valid packets to given host and port (UDP)
-i <pidfile> Process ID file (defaults to '/tmp/awsxd.pid')
-m <unit> Windspeed unit ('kmh' (default), 'knots' or 'ms')
"""
import SocketServer
import getopt
import sys
import MySQLdb
import os.path
import subprocess
import ConfigParser
import socket
global verbose
global callback
global config
global fwhost
global fwport
global speed_multiplier
class NMEAException(Exception):
pass
class NMEASentence:
def __init__(self, str):
left = str.find("$")
if (left == -1):
raise NMEAException("Invalid packet (no $ present)")
right = str.find("*")
if (right == -1):
raise NMEAException("Invalid packet (no * present)")
self.payload = str[left+1:right]
actual_csum = self.checksum()
expected_csum = int(str[right+1:], 16)
if (actual_csum != expected_csum):
raise NMEAException("Checksum mismatch (0x%02X != 0x%02X)" % (actual_csum, expected_csum))
self.fields = str[left+1:right].split(',')
def checksum(self):
actual_checksum = 0
for c in self.payload:
actual_checksum = actual_checksum ^ ord(c)
return actual_checksum
def encode(self):
return "$%s*%02X" % (self.payload, self.checksum())
class AWSException(Exception):
pass
class AWSPacket(NMEASentence):
def _parseDec(value):
return int(value, 10)
def _parseString(value):
return value
def _parseDate(value):
return value
def _parseTime(value):
return value
def _parseFloat(value):
return float(value)
_awsFields = [
# Packet header, always 'DPTAW'
{ 'tag': "header", 'f': _parseString },
# Date in yyyy/mm/dd format
{ 'tag': "date", 'f': _parseDate },
# Time in hh:mm (24h) format
{ 'tag': "time", 'f': _parseTime },
# Station ID (10)
{ 'tag': "id", 'f': _parseString },
# SMS serial number (SMS counter)
{ 'tag': "smsc", 'f': _parseDec },
# Sample interval
{ 'tag': "si", 'f': _parseFloat },
# Wind average speed ('si' period, 1 sample/second)
{ 'tag': "was", 'f': _parseFloat },
# Air pressure (millibars)
{ 'tag': "wssd", 'f': _parseFloat },
# Minimum wind speed ('si' period, 1 sample/second)
{ 'tag': "wmins", 'f': _parseFloat },
# Max wind gust (3s gusts) ('si' period, 1 sample/second)
{ 'tag': "wgust", 'f': _parseFloat },
# Daily gust (maximum gust of the day)
{ 'tag': "dwgust", 'f': _parseFloat },
# Leaf wetness
{ 'tag': "leaf_wetness", 'f': _parseDec },
# Average wind direction ('si' period, 1 sample/second)
{ 'tag': "wdir", 'f': _parseDec },
# Wind direction, standard deviation
{ 'tag': "wdsd", 'f': _parseDec },
# Solar radiation
{ 'tag': "sun", 'f': _parseDec },
# Average temperature ('si' period, 1 sample/second)
{ 'tag': "temp", 'f': _parseFloat },
# Daily minimum temperature
{ 'tag': "dmintemp", 'f': _parseFloat },
# Daily maximum temperature
{ 'tag': "dmaxtemp", 'f': _parseFloat },
# Soil temperature
{ 'tag': "soilt", 'f': _parseFloat },
# Rainfall
{ 'tag': "rf", 'f': _parseFloat },
# Daily rainfall
{ 'tag': "drf", 'f': _parseFloat },
# Soil water potential
{ 'tag': "soilw", 'f': _parseDec },
# Dew point
{ 'tag': "dp", 'f': _parseFloat },
# Relative humidity
{ 'tag': "rh", 'f': _parseFloat },
# Daily minimum relative humidity
{ 'tag': "dminrh", 'f': _parseFloat },
# Daily maximum relative humidity
{ 'tag': "dmaxrh", 'f': _parseFloat },
# Power supply type (E=External/Solar, B=Battery)
{ 'tag': "pwtype", 'f': _parseString },
# Battery voltage
{ 'tag': "battvolt", 'f': _parseFloat },
# Dummy (trailing comma -- always blank)
{ 'tag': "blank", 'f': _parseString }
]
def __init__(self, str):
NMEASentence.__init__(self, str)
if (len(self.fields) != len(self._awsFields)):
raise AWSException("Invalid fieldcount (%d)" % len(self.fields))
if (self.fields[0] != "DPTAW"):
raise AWSException("Unknown packet type %s" % self.fields[0])
self._awsValues = {}
try:
for idx, field in enumerate(self._awsFields):
self._awsValues[field["tag"]] = field["f"](self.fields[idx])
except Exception as x:
raise AWSException("Parse error for %s: %s (%s)" % (field["tag"], self.fields[idx], x))
def __str__(self):
return self._awsValues.__str__()
def get(self, field):
if field in self._awsValues:
return self._awsValues[field]
else:
return None
def usage(*args):
sys.stdout = sys.stderr
print __doc__
for msg in args: print msg
sys.exit(2)
def log(str):
if (verbose > 0):
print str
def dbg(str):
if (verbose > 1):
print str
def run_callback(packet):
ret = subprocess.call([callback, packet.get('id')])
if (ret):
print "Callback '%s' failed with retcode %d" % (callback, ret)
def forward_packet(pkt):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(pkt.encode(), (fwhost, fwport))
dbg("Packet replicated for %s:%d." % (fwhost, fwport))
sock.close()
def insert_database(pkt):
db = MySQLdb.connect(host=config.get('mysql', 'dbhost'),
user=config.get('mysql', 'dbuser'),
passwd=config.get('mysql', 'dbpass'),
db=config.get('mysql', 'dbname'))
cur = db.cursor()
q = """INSERT INTO awsx
(
tstamp, station, sms_counter, sample_interval, wind_avg,
wind_min, wind_max, wind_daily_max, wind_dir, wind_stability,
air_pressure, leaf_wetness, sun_radiation, temp_avg, temp_daily_min,
temp_daily_max, soil_temp, rainfall, rainfall_daily, soil_moisture,
dewpoint, humidity, humidity_daily_min, humidity_daily_max, power_supply,
battery_voltage
)
VALUES (
%s, %s, %s, %s, %s,
%s, %s, %s, %s, %s,
%s, %s, %s, %s, %s,
%s, %s, %s, %s, %s,
%s, %s, %s, %s, %s,
%s
)"""
values = (pkt.get('date') + " " + pkt.get('time'),
pkt.get('id'),
pkt.get('smsc'),
pkt.get('si'),
pkt.get('was')*speedmultiplier,
pkt.get('wmins')*speedmultiplier,
pkt.get('wgust')*speedmultiplier,
pkt.get('dwgust')*speedmultiplier,
pkt.get('wdir'),
pkt.get('wdsd'),
pkt.get('wssd'),
pkt.get('leaf_wetness'),
pkt.get('sun'),
pkt.get('temp'),
pkt.get('dmintemp'),
pkt.get('dmaxtemp'),
pkt.get('soilt'),
pkt.get('rf'),
pkt.get('drf'),
pkt.get('soilw'),
pkt.get('dp'),
pkt.get('rh'),
pkt.get('dminrh'),
pkt.get('dmaxrh'),
pkt.get('pwtype'),
pkt.get('battvolt'))
dbg(q % values)
if (not cur.execute(q, values)):
expanded_q = q % values
log("Failed to insert record: '%s'" % expanded_q)
return False
return True
def process(str, source = None):
if source is not None:
dbg("Received from %s" % source)
dbg("Processing %d bytes: %s" % (len(str), str))
try:
packet = AWSPacket(str)
log(packet)
insert_database(packet)
if fwhost:
forward_packet(packet)
if callback:
run_callback(packet)
except Exception as e:
print(e)
class AWSHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request[0]
ip = self.client_address[0]
process(data, ip)
if __name__ == "__main__":
config = ConfigParser.RawConfigParser({'dbhost': 'localhost',
'dbpass': '',
'dbuser': 'awsxd',
'dbname': 'awsxd'})
pidfile = "/tmp/awsxd.pid"
host = "localhost"
port = 9999
verbose = 0
callback = None
cfgfile = "awsxd.conf"
simstr = False
fwhost = None
fwport = None
speedmultiplier = 1/3.6
try:
opts, args = getopt.getopt(sys.argv[1:], 'p:h:vs:c:f:r:i:m:')
except getopt.error, msg:
usage(msg)
for o, a in opts:
if o == '-p': port = int(a)
if o == '-m':
if a == 'kmh':
speedmultiplier = 1/3.6
elif a == 'knots':
speedmultiplier = 0.514
elif a == 'ms':
speedmultiplier = 1.0
else:
print "Invalid unit '%s'" % a
sys.exit(1)
if o == '-v': verbose = verbose + 1
if o == '-h': host = a
if o == '-f': cfgfile = a
if o == '-s': simstr = a
if o == '-i': pidfile = a
if o == '-r':
hostport = a.split(':')
if len(hostport) > 2:
print "Invalid replication (-x) host '%s', aborting." % a
sys.exit(1)
fwhost = hostport[0]
if len(hostport) == 2:
fwport = int(hostport[1])
if o == '-c':
if (not os.path.isfile(a)):
print "No such callback file '%s', aborting." % a
sys.exit(1)
if (not os.access(a, os.X_OK)):
print "Specified callback file '%s' is not an executable." % a
sys.exit(1)
callback = a;
log("Using config '%s'" % cfgfile)
config.read(cfgfile)
if (fwhost != None):
if (fwport == None):
fwport = port
log("Replicating packets to %s:%d." % (fwhost, fwport))
if (simstr):
log("Simulating input: %s" % simstr)
process(simstr)
else:
pid = str(os.getpid())
if os.path.isfile(pidfile):
print "%s already exists, exiting." % pidfile
sys.exit(1)
file(pidfile, 'w').write(pid)
log("Listening at %s:%d" % (host, port))
try:
server = SocketServer.UDPServer((host, port), AWSHandler)
server.serve_forever()
except:
os.unlink(pidfile)
raise
|
WindWiz/awsx-daemon
|
awsxd.py
|
Python
|
gpl-3.0
| 11,645
|
"""
WSGI config for apiDamificados project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
settingsEnv = "apiDamificados.settings." + os.environ.get('DJANGO_ENV')
#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "apiDamificados.settings.development")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", settingsEnv)
application = get_wsgi_application()
|
ramrodo/devfbackend-api
|
apiDamificados/apiDamificados/wsgi.py
|
Python
|
mit
| 552
|
from django import template
from django.conf import settings
register = template.Library()
@register.assignment_tag
def settings_value(name):
return getattr(settings, name, "")
@register.assignment_tag
def is_topology_model(model):
return hasattr(model, 'kind') and hasattr(model, 'offset')
|
mabhub/Geotrek
|
geotrek/common/templatetags/geotrek_tags.py
|
Python
|
bsd-2-clause
| 304
|
"""
Finding isomorphic/canonical representations of the flop.
In Texas Holdem suits have no intrinsic value. Thus, if the flop is "2d 3d 4d",
the possible holecards "5c 6c", "5h 6h", and "5s 6s" are equivalent -- they are
each a straight and the suits are irrelevant. Similarly, the flop "4d 3d 2d" is
equivalent to the flop above -- the order of cards is irrelevant. This fact may
be used to dramatically reduce the number of lookup tables required to store
flop equity data; there are 22,100 possible flops, but only 1,755 canonical
versions.
In this module we develop an algorithm for making a list of canonical flops and
also a way to find, for any given randomly generated flop, what its canonical
form is.
For completeness I also include what is, loosely speaking, the opposite
function: a way to produce all suit-isomorphs of a given flop.
Finally, you will find a way to map the suits on the current flop to the suits
on its canonical version. This will allow us to translate the rest of the
current scenario, including our holecards.
"""
from itertools import combinations
from pokertools import CARDS, SUITS
#------------------------------------------------------------------------------
# Finding the Canonical Version
#
# The position-isomorphic aspect of this problem is easy to solve: we can
# simply specify that canonical flops must be sorted. The suit-isomorphic
# aspect is a little more difficult. Suits may appear on the flop in five
# patterns: 'AAA', 'AAB', 'ABA', 'ABB', 'ABC', where the capital letters
# represent arbirary suits. One way to approach this is to specify a canonical
# ordering of suits: that the left-most suit 'A' must be a club, 'B' must be a
# diamond, and 'C' must be a heart. This _almost_ solves the problem elegantly;
# two remaining edge cases are dealt with below.
def get_canonical(flop):
"""
Returns the canonical version of the given flop.
Canonical flops are sorted. The first suit is 'c' and, if applicable,
the second is 'd' and the third is 'h'.
Args:
flop (tuple): three pokertools.Card objects
Returns
A tuple of three pokertools.Card objects which represent
the canonical version of the given flop.
>>> flop = (CARDS['Ks'], CARDS['2c'], CARDS['3s'])
>>> get_canonical(flop)
(<Card: 2c>, <Card: 3d>, <Card: Kd>)
"""
card1, card2, card3 = sorted(flop)
A, B, C = "cdh"
if card1.suit == card2.suit == card3.suit:
return (
CARDS[card1.rank + A],
CARDS[card2.rank + A],
CARDS[card3.rank + A],
)
elif card1.suit == card2.suit != card3.suit:
return (
CARDS[card1.rank + A],
CARDS[card2.rank + A],
CARDS[card3.rank + B],
)
elif card1.suit == card3.suit != card2.suit:
# Special case: if the 2nd and 3rd cards are a pair e.g. the flop is
# [Jc, Qd, Qc], then our suit changes have resulted in an
# unsorted flop! The correct canonical form is [Jc, Qc, Qd].
return tuple(sorted([
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + A],
]))
elif card1.suit != card2.suit == card3.suit:
# Special case: if the 1st and 2nd cards are a pair e.g. flop is
# [2c, 2d, 8d], that is isomorphic with those cards being switched
# e.g. [2d, 2c, 8d] -- which forms the suit pattern already
# covered above: 'ABA'. Thus, it can be transformed to [2c, 2d, 8c].
# This version has higher priority lexicographically -- it has more
# clubs! To make this change we can simply change the suit of the
# third card to 'c'.
if card1.rank == card2.rank:
return (
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + A],
)
return (
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + B],
)
elif card1.suit != card2.suit != card3.suit:
return (
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + C],
)
def get_all_canonicals():
"""
Returns the set of all canonical flops. Each flop is a tuple of three
pokertools.Card objects.
"""
all_possible_flops = combinations(CARDS.values(), r=3)
return set(tuple(get_canonical(flop)) for flop in all_possible_flops)
#------------------------------------------------------------------------------
# Suit-Isomorphs
def get_suit_isomorphs(flop):
"""
Returns a list of all suit-isomorphic combinations of the flop. Each
flop is a tuple of three pokertools.Card objects.
>>> flop = (CARDS['As'], CARDS['4s'], CARDS['Ts'])
>>> for iso in get_suit_isomorphs(flop):
... print(iso)
(<Card: Ac>, <Card: 4c>, <Card: Tc>)
(<Card: Ad>, <Card: 4d>, <Card: Td>)
(<Card: Ah>, <Card: 4h>, <Card: Th>)
(<Card: As>, <Card: 4s>, <Card: Ts>)
>>> flop = (CARDS['Kd'], CARDS['Qh'], CARDS['8c'])
>>> len(get_suit_isomorphs(flop))
24
"""
card1, card2, card3 = flop
if card1.suit == card2.suit == card3.suit:
# For each suit, produce the suit pattern 'AAA'
return [
(
CARDS[card1.rank + A],
CARDS[card2.rank + A],
CARDS[card3.rank + A],
)
for A in SUITS
]
elif card1.suit == card2.suit != card3.suit:
# For each combination of two non-identical
# suits, produce the suit pattern 'AAB'
return [
(
CARDS[card1.rank + A],
CARDS[card2.rank + A],
CARDS[card3.rank + B],
)
for A in SUITS for B in SUITS if A != B
]
elif card1.suit != card2.suit == card3.suit:
# For each combination of two non-identical
# suits, produce the suit pattern 'ABB'
return [
(
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + B],
)
for A in SUITS for B in SUITS if A != B
]
elif card1.suit == card3.suit != card2.suit:
# For each combination of two non-identical
# suits, produce the suit pattern 'ABA'
return [
(
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + A],
)
for A in SUITS for B in SUITS if A != B
]
elif card1.suit != card2.suit != card3.suit:
# For each combination of three non-identical
# suits, produce the suit pattern 'ABC'
return [
(
CARDS[card1.rank + A],
CARDS[card2.rank + B],
CARDS[card3.rank + C],
)
for A in SUITS for B in SUITS for C in SUITS
if (A != B and B != C and A != C)
]
#------------------------------------------------------------------------------
# Translation Dict
def get_translation_dict(flop):
"""
Returns a dict which maps suits to other suits. The keys represent the
suits on the given flop. The values represent the suits on the canonical
version. This tell us what the 'translation' is between them, allowing us
to translate the suits of our holecards.
>>> flop = (CARDS['6h'], CARDS['2d'], CARDS['Qd'])
>>> get_canonical(flop)
(<Card: 2c>, <Card: 6d>, <Card: Qc>)
>>> get_translation_dict(flop) == {'c': 'h', 'd': 'c', 'h': 'd', 's': 's'}
True
"""
flop = sorted(flop)
suit1, suit2, suit3 = [card.suit for card in flop]
canonical_flop = get_canonical(flop)
canon1, canon2, canon3 = [card.suit for card in canonical_flop]
# if the flop matches the canonical version, no translation necessary
if (suit1, suit2, suit3) == (canon1, canon2, canon3):
return {"c": "c", "d": "d", "h": "h", "s": "s"}
unused = {"h", "d", "c", "s"} - {suit1, suit2, suit3}
canonical_unused = {"h", "d", "c", "s"} - {canon1, canon2, canon3}
both_unused = unused & canonical_unused
# listed for indexing the elements, sorted for deterministic output
unused = sorted(list(unused))
canonical_unused = sorted(list(canonical_unused))
both_unused = sorted(list(both_unused))
if suit1 == suit2 == suit3:
# suit pattern is 'AAA'
return {
suit1: canon1, # The first flop suit and the
canon1: suit1, # first canon suit must switch.
both_unused[0]: both_unused[0], # The remaining two suits
both_unused[1]: both_unused[1], # don't matter
}
elif suit1 == suit2 != suit3:
# suit pattern is 'AAB'
return {
suit1: canon1, # suit of 1st card = 1st canon
suit3: canon3, # suit of 3rd card = 3rd canon
unused[0]: canonical_unused[0], # Must be the remaining two
unused[1]: canonical_unused[1], # suits of each set
}
elif suit1 != suit2 == suit3:
# suit pattern is 'ABB'
return {
suit1: canon1, # suit of 1st card = 1st canon
suit2: canon2, # suit of 2nd card = 2nd canon
unused[0]: canonical_unused[0], # Must be the remaining two
unused[1]: canonical_unused[1], # suits of each set
}
# Note the order of cards
elif suit1 == suit3 != suit2:
# suit pattern is 'ABA'
return {
suit1: canon1, # suit of 1st card = 1st canon
suit2: canon2, # suit of 2nd card = 2nd canon
unused[0]: canonical_unused[0], # Must be the remaining two
unused[1]: canonical_unused[1], # suits of each set
}
elif suit1 != suit2 != suit3:
# suit pattern is 'ABC'
return {
suit1: canon1, # suit of 1st card = 1st canon
suit2: canon2, # suit of 2nd card = 2nd canon
suit3: canon3, # suit of 3rd card = 3rd canon
unused[0]: canonical_unused[0], # The remaining suits.
}
|
mjwestcott/PyPokertools
|
examples/isomorph.py
|
Python
|
mit
| 10,374
|
# Generated by Django 3.1.2 on 2020-10-21 02:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('files', '0004_image_compressed'),
('inventory', '0007_auto_20201021_0154'),
]
operations = [
migrations.AlterField(
model_name='item',
name='thumbnail',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='files.image'),
),
]
|
hackerspace-ntnu/website
|
inventory/migrations/0008_auto_20201021_0211.py
|
Python
|
mit
| 527
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# TODO(morganfainberg): Remove this file and extension in the "O" release as
# it is only used in support of the PKI/PKIz token providers.
import functools
import webob
from keystone.common import controller
from keystone.common import dependency
from keystone.common import extension
from keystone.common import json_home
from keystone.common import wsgi
import keystone.conf
from keystone import exception
CONF = keystone.conf.CONF
EXTENSION_DATA = {
'name': 'OpenStack Simple Certificate API',
'namespace': 'http://docs.openstack.org/identity/api/ext/'
'OS-SIMPLE-CERT/v1.0',
'alias': 'OS-SIMPLE-CERT',
'updated': '2014-01-20T12:00:0-00:00',
'description': 'OpenStack simple certificate retrieval extension',
'links': [
{
'rel': 'describedby',
'type': 'text/html',
'href': 'http://developer.openstack.org/'
'api-ref-identity-v2-ext.html',
}
]}
extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
build_resource_relation = functools.partial(
json_home.build_v3_extension_resource_relation,
extension_name='OS-SIMPLE-CERT', extension_version='1.0')
class Routers(wsgi.RoutersBase):
def _construct_url(self, suffix):
return "/OS-SIMPLE-CERT/%s" % suffix
def append_v3_routers(self, mapper, routers):
controller = SimpleCert()
self._add_resource(
mapper, controller,
path=self._construct_url('ca'),
get_action='get_ca_certificate',
rel=build_resource_relation(resource_name='ca_certificate'))
self._add_resource(
mapper, controller,
path=self._construct_url('certificates'),
get_action='list_certificates',
rel=build_resource_relation(resource_name='certificates'))
@dependency.requires('token_provider_api')
class SimpleCert(controller.V3Controller):
def _get_certificate(self, name):
try:
with open(name, 'r') as f:
body = f.read()
except IOError:
raise exception.CertificateFilesUnavailable()
# NOTE(jamielennox): We construct the webob Response ourselves here so
# that we don't pass through the JSON encoding process.
headers = [('Content-Type', 'application/x-pem-file')]
return webob.Response(body=body, headerlist=headers, status="200 OK")
def get_ca_certificate(self, context):
return self._get_certificate(CONF.signing.ca_certs)
def list_certificates(self, context):
return self._get_certificate(CONF.signing.certfile)
|
cernops/keystone
|
keystone/token/_simple_cert.py
|
Python
|
apache-2.0
| 3,268
|
"""
Support for binary sensors using Tellstick Net.
This platform uses the Telldus Live online service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.tellduslive/
"""
import logging
from homeassistant.components import binary_sensor, tellduslive
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.tellduslive.entry import TelldusLiveEntity
from homeassistant.helpers.dispatcher import async_dispatcher_connect
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Old way of setting up TelldusLive.
Can only be called when a user accidentally mentions the platform in their
config. But even in that case it would have been ignored.
"""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up tellduslive sensors dynamically."""
async def async_discover_binary_sensor(device_id):
"""Discover and add a discovered sensor."""
client = hass.data[tellduslive.DOMAIN]
async_add_entities([TelldusLiveSensor(client, device_id)])
async_dispatcher_connect(
hass,
tellduslive.TELLDUS_DISCOVERY_NEW.format(binary_sensor.DOMAIN,
tellduslive.DOMAIN),
async_discover_binary_sensor)
class TelldusLiveSensor(TelldusLiveEntity, BinarySensorDevice):
"""Representation of a Tellstick sensor."""
@property
def is_on(self):
"""Return true if switch is on."""
return self.device.is_on
|
PetePriority/home-assistant
|
homeassistant/components/tellduslive/binary_sensor.py
|
Python
|
apache-2.0
| 1,646
|
"""
we use this to mark the active ccx, for use by ccx middleware and some views
"""
ACTIVE_CCX_KEY = '_ccx_id'
|
dkarakats/edx-platform
|
lms/djangoapps/ccx/__init__.py
|
Python
|
agpl-3.0
| 112
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class GameInformerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gameinformer\.com/(?:[^/]+/)*(?P<id>.+)\.aspx'
_TEST = {
'url': 'http://www.gameinformer.com/b/features/archive/2015/09/26/replay-animal-crossing.aspx',
'md5': '292f26da1ab4beb4c9099f1304d2b071',
'info_dict': {
'id': '4515472681001',
'ext': 'mp4',
'title': 'Replay - Animal Crossing',
'description': 'md5:2e211891b215c85d061adc7a4dd2d930',
'timestamp': 1443457610,
'upload_date': '20150928',
'uploader_id': '694940074001',
},
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/694940074001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(
url, display_id, headers=self.geo_verification_headers())
brightcove_id = self._search_regex(
[r'<[^>]+\bid=["\']bc_(\d+)', r"getVideo\('[^']+video_id=(\d+)"],
webpage, 'brightcove id')
return self.url_result(
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew',
brightcove_id)
|
epitron/youtube-dl
|
youtube_dl/extractor/gameinformer.py
|
Python
|
unlicense
| 1,315
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for :func:`iris.analysis.cartography._xy_range`"""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import iris.tests.stock as stock
import numpy as np
from iris.analysis.cartography import _xy_range
class Test(tests.IrisTest):
def test_bounds_mismatch(self):
cube = stock.realistic_3d()
cube.coord("grid_longitude").guess_bounds()
with self.assertRaisesRegex(ValueError, "bounds"):
_ = _xy_range(cube)
def test_non_circular(self):
cube = stock.realistic_3d()
assert not cube.coord("grid_longitude").circular
result_non_circ = _xy_range(cube)
self.assertEqual(result_non_circ, ((-5.0, 5.0), (-4.0, 4.0)))
@tests.skip_data
def test_geog_cs_circular(self):
cube = stock.global_pp()
assert cube.coord("longitude").circular
result = _xy_range(cube)
np.testing.assert_array_almost_equal(
result, ((0, 360), (-90, 90)), decimal=0
)
@tests.skip_data
def test_geog_cs_regional(self):
cube = stock.global_pp()
cube = cube[10:20, 20:30]
assert not cube.coord("longitude").circular
result = _xy_range(cube)
np.testing.assert_array_almost_equal(
result, ((75, 108.75), (42.5, 65)), decimal=0
)
if __name__ == "__main__":
tests.main()
|
pp-mo/iris
|
lib/iris/tests/unit/analysis/cartography/test__xy_range.py
|
Python
|
lgpl-3.0
| 1,628
|
from django.shortcuts import render
from django.views import View
class SiteUpdateNotifier(View):
def get(self, request):
pass
|
k00n/site_update_notifier
|
siteUpdateNotifier/sun/views.py
|
Python
|
mit
| 141
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 31 19:59:03 2016
@author: Stella Psomadaki
"""
#run for mini
import os
import time
from tabulate import tabulate
from pointcloud.AbstractQuerier import Querier
import pointcloud.oracleTools as ora
###########################
### Setup Variables ###
###########################
dataset = 'zandmotor'
integrations = ['lxyt', 'lxyzt']
merges = [10, 100, 1000, 10000]
scaling = '1'
repeat = 3
queriyIds = [1, 2, 3, 4, 6, 7, 8, 9, 11, 12]
###########################
fh = open('non-int_glueing_{0}.txt'.format(time.strftime("%d%m%Y")), 'a')
fh.write('Test executed on \n')
fh.write(time.strftime("%d/%m/%Y"))
fh.write('\n')
fh.write(
"""CASE: Non-integrated approach (loose) with scale of 1
Test to identufy what is the effect of having different degree of merging.
Different approaches:
* z as an attribute and as part of morton code
The queries are repeated 3 times
--START--\n\n\n""")
hquery = ["id", 'maxRanges', "prep.", 'insert', 'ranges', 'Levels', 'fetching', "decoding", 'storing', "Appr.pts", "Fin.pts", "FinFilt", "time", 'extra%', 'total']
path = os.getcwd()
for integr in integrations:
queries = []
configuration = path + '/ini/' + dataset + '/' + integr + '_' + scaling + '_0_False_part1.ini'
querier = Querier(configuration)
querier.numProcesses = 0
connection = querier.getConnection()
cursor = connection.cursor()
cursor.execute('SELECT table_name FROM all_tables WHERE table_name = :1',[querier.queriesTable.upper(),])
length = len(cursor.fetchall())
if not length:
os.system('python -m pointcloud.queryTab {0}'.format(configuration))
if integr == 'lxyt':
# run two first with -2
levels = [19, 19, 20, 19, 20, 22, 22, 19, 20, 19]
elif integr == 'lxyzt':
levels = [14, 14, 16, 15, 16, 17, 17, 15, 16, 15]
for num in range(len(queriyIds)):
fh.write('\n\n')
querier.numLevels = levels[num]
for merge in merges:
querier.maxRanges = merge
for j in range(repeat):
start = time.time()
lst = querier.query(str(queriyIds[num]))
lst.append(round(time.time() - start, 2))
lst.append(round((float(lst[7]) - float(lst[8]))/float(lst[8])*100,2))
lst.append(round(lst[1] + lst[4] + lst[5] + lst[6] + lst[9],2))
lst.insert(0, merge)
lst.insert(0, queriyIds[num])
queries.append(lst)
ora.dropTable(cursor, querier.queryTable + '_' + str(queriyIds[num]))
print tabulate([lst], hquery, tablefmt="plain")
fh.write(tabulate([lst], tablefmt="plain"))
fh.write('\n')
fh.write('\n\n')
print integr + '\n\n'
print tabulate(queries, hquery, tablefmt="plain")
fh.write(integr + '\n\n')
fh.write(tabulate(queries, hquery, tablefmt="plain"))
fh.write('\n\n\n\n')
|
stpsomad/DynamicPCDMS
|
pointcloud/run/zand/loose_ranges_glueing.py
|
Python
|
isc
| 3,080
|
import unittest
from pymacaron_core.swagger.api import API
from pymacaron_core.models import get_model
from pymacaron_core.models import PyMacaronModel
#
# Swagger spec
#
yaml_str = """
swagger: '2.0'
info:
version: '0.0.1'
host: some.server.com
schemes:
- http
produces:
- application/json
definitions:
Foo:
type: object
properties:
s:
type: string
i:
type: integer
o:
$ref: '#/definitions/Bar'
lst:
type: array
items:
type: string
lo:
type: array
items:
$ref: '#/definitions/Bar'
Bar:
type: object
properties:
s:
type: string
o:
$ref: '#/definitions/Baz'
Baz:
type: object
properties:
s:
type: string
"""
#
# Tests
#
class Tests(unittest.TestCase):
def setUp(self):
API('somename', yaml_str=yaml_str)
def test__setattr__getattr(self):
o = get_model('Foo')()
# set/get a bravado attribute
self.assertEqual(o.s, None)
self.assertEqual(getattr(o, 's'), None)
o.s = 'bob'
self.assertEqual(o.s, 'bob')
self.assertEqual(getattr(o, 's'), 'bob')
# Make sure it's really the Bravado instance's attribute that was updated
self.assertTrue('s' not in dir(o))
self.assertEqual(getattr(o, '__bravado_instance').s, 'bob')
o.s = None
self.assertEqual(o.s, None)
self.assertEqual(getattr(o, 's'), None)
setattr(o, 's', 'bob')
self.assertTrue('s' not in dir(o))
self.assertEqual(getattr(o, '__bravado_instance').s, 'bob')
self.assertEqual(o.s, 'bob')
self.assertEqual(getattr(o, 's'), 'bob')
setattr(o, 's', None)
self.assertTrue('s' not in dir(o))
self.assertEqual(getattr(o, '__bravado_instance').s, None)
self.assertEqual(o.s, None)
self.assertEqual(getattr(o, 's'), None)
# set/get a local attribute
with self.assertRaises(Exception) as context:
o.local
self.assertTrue("Model 'Foo' has no attribute local" in str(context.exception))
with self.assertRaises(Exception) as context:
getattr(o, 'local')
self.assertTrue("Model 'Foo' has no attribute local" in str(context.exception))
o.local = 'bob'
self.assertTrue('local' in dir(o))
self.assertEqual(o.local, 'bob')
self.assertEqual(getattr(o, 'local'), 'bob')
o.local = None
self.assertEqual(o.local, None)
self.assertEqual(getattr(o, 'local'), None)
setattr(o, 'local', 'bob')
self.assertEqual(o.local, 'bob')
self.assertEqual(getattr(o, 'local'), 'bob')
setattr(o, 'local', None)
self.assertEqual(o.local, None)
self.assertEqual(getattr(o, 'local'), None)
def test__hasattr(self):
o = get_model('Foo')()
self.assertTrue(hasattr(o, 's'))
self.assertFalse(hasattr(o, 'local'))
o.local = None
self.assertTrue(hasattr(o, 'local'))
def test__delattr(self):
o = get_model('Foo')()
o.s = 'bob'
self.assertEqual(o.s, 'bob')
del o.s
self.assertTrue(hasattr(o, 's'))
self.assertEqual(o.s, None)
o.s = 'bob'
self.assertEqual(o.s, 'bob')
delattr(o, 's')
self.assertTrue(hasattr(o, 's'))
self.assertEqual(o.s, None)
o.local = 'bob'
self.assertEqual(o.local, 'bob')
del o.local
self.assertFalse(hasattr(o, 'local'))
with self.assertRaises(Exception) as context:
o.local
self.assertTrue("Model 'Foo' has no attribute local" in str(context.exception))
o.local = 'bob'
self.assertEqual(o.local, 'bob')
delattr(o, 'local')
self.assertFalse(hasattr(o, 'local'))
with self.assertRaises(Exception) as context:
o.local
self.assertTrue("Model 'Foo' has no attribute local" in str(context.exception))
def test__getitem__setitem__delitem(self):
o = get_model('Foo')()
self.assertEqual(o.s, None)
self.assertEqual(o['s'], None)
o['s'] = 'bob'
self.assertEqual(o.s, 'bob')
self.assertEqual(o['s'], 'bob')
o['s'] = None
self.assertEqual(o.s, None)
self.assertEqual(o['s'], None)
o['s'] = 'bob'
self.assertEqual(o.s, 'bob')
del o['s']
self.assertEqual(o.s, None)
# But local attributes may not be set this way
with self.assertRaises(Exception) as context:
o['local']
self.assertTrue("Model 'Foo' has no attribute local" in str(context.exception))
with self.assertRaises(Exception) as context:
o['local'] = 123
self.assertTrue("Model 'Foo' has no attribute local" in str(context.exception))
with self.assertRaises(Exception) as context:
del o['local']
self.assertTrue("Model 'Foo' has no attribute local" in str(context.exception))
def test__eq(self):
Foo = get_model('Foo')
Bar = get_model('Bar')
a = Foo(s='abc', i=12, o=Bar(s='def'))
b = Foo(s='abc', i=12, o=Bar(s='def'))
self.assertEqual(a, b)
self.assertNotEqual(a, 'bob')
# Adding local parameters does not affect eq
a.local = 'whatever'
self.assertEqual(a, b)
# Changing bravado values makes them different
a.o.s = '123'
self.assertNotEqual(a, b)
def test__to_json__from_json(self):
Foo = get_model('Foo')
Bar = get_model('Bar')
Baz = get_model('Baz')
a = Foo(
s='abc',
i=12,
lst=['a', 'b', 'c'],
o=Bar(
s='1',
o=Baz(
s='2'
)
),
lo=[
Baz(s='r'),
Baz(s='t'),
Baz(s='u'),
Baz(),
]
)
self.assertTrue(isinstance(a, PyMacaronModel))
j = a.to_json()
self.assertEqual(
j,
{
's': 'abc',
'i': 12,
'lst': ['a', 'b', 'c'],
'o': {'o': {'s': '2'}, 's': '1'},
'lo': [{'s': 'r'}, {'s': 't'}, {'s': 'u'}, {}],
}
)
o = Foo.from_json(j)
self.assertTrue(isinstance(o, PyMacaronModel))
# TODO: o now has multiple attributes set to None, while a lacks them,
# and bravado's __eq__ does not see None and absence as equal...
# self.assertEqual(o, a)
jj = o.to_json()
self.assertEqual(jj, j)
def test__update_from_dict(self):
foo = get_model('Foo')()
foo.update_from_dict({'s': 'bob'})
self.assertEqual(
foo.to_json(),
{'s': 'bob'},
)
foo.update_from_dict({'s': 'abc', 'i': 12})
self.assertEqual(
foo.to_json(),
{'s': 'abc', 'i': 12},
)
foo.update_from_dict({})
self.assertEqual(
foo.to_json(),
{'s': 'abc', 'i': 12},
)
foo.update_from_dict({'i': None})
self.assertEqual(
foo.to_json(),
{'s': 'abc'},
)
foo.update_from_dict({'s': None, 'i': 32}, ignore_none=True)
self.assertEqual(
foo.to_json(),
{'s': 'abc', 'i': 32},
)
foo.update_from_dict({'s': None})
self.assertEqual(
foo.to_json(),
{'i': 32},
)
|
erwan-lemonnier/klue-client-server
|
test/test_model.py
|
Python
|
bsd-3-clause
| 7,677
|
from rgp import rgp
class rgp_hm(rgp):
pass
|
hiqdev/reppy
|
heppy/modules/rgp_hm.py
|
Python
|
bsd-3-clause
| 49
|
import numpy as np
from scipy.optimize import differential_evolution as DE
from scipy.special import exp1
try:
# If this import is not done outside main(), then eval() fails in the
# definition of the moves
from emcee import moves
except ImportError:
pass
import warnings
from ..best_fit.bf_common import modeKDE
from .. import update_progress
def main(
clp, plx_bayes_flag, plx_offset, plx_chains, plx_runs, plx_burn,
plx_emcee_moves, flag_make_plot, outlr_std=3., **kwargs):
"""
Bayesian parallax distance using the Bailer-Jones (2015) model with the
'shape parameter' marginalized.
Hardcoded choices:
* outlr_std sigma outliers are rejected
* Bayesian prior is a Gaussian with a fixed standard deviation
"""
plx_clrg, mmag_clp, plx_clp, e_plx_clp, plx_samples,\
plx_tau_autocorr, mean_afs = [[] for _ in range(7)]
plx_flag_clp, plx_bayes_flag_clp, plx_wa, plx_Bayes_kde, plx_Bys,\
plx_ess = False, False, np.nan, np.array([]), np.array([]), np.nan
if ('C2' in flag_make_plot) or plx_bayes_flag:
# Extract parallax data.
plx = np.array(list(zip(*list(zip(*clp['cl_reg_fit']))[7]))[0])
# Array with no nan values
plx_clrg = plx[~np.isnan(plx)]
plx_flag_clp = checkPlx(plx_clrg)
if plx_flag_clp:
print("Processing parallaxes")
# Reject outlr_std*\sigma outliers.
max_plx = np.nanmedian(plx) + outlr_std * np.nanstd(plx)
min_plx = np.nanmedian(plx) - outlr_std * np.nanstd(plx)
# Suppress Runtimewarning issued when 'plx' contains 'nan'
# values.
with np.warnings.catch_warnings():
np.warnings.filterwarnings('ignore')
plx_2s_msk = (plx < max_plx) & (plx > min_plx)
# Prepare masked data.
mmag_clp = np.array(
list(zip(*list(zip(
*clp['cl_reg_fit']))[3]))[0])[plx_2s_msk]
plx_clp = plx[plx_2s_msk]
e_plx_clp = np.array(
list(zip(*list(zip(
*clp['cl_reg_fit']))[8]))[0])[plx_2s_msk]
# Take care of possible zero values that can produce issues
# since errors are in the denominator.
e_plx_clp[e_plx_clp == 0.] = 10.
# Weighted average.
# Source: https://physics.stackexchange.com/a/329412/8514
plx_w = 1. / np.square(e_plx_clp)
plx_w = plx_w if plx_w.sum() > 0. else None
plx_wa = np.average(plx_clp, weights=plx_w)
if plx_bayes_flag:
plx_samples, plx_Bayes_kde, plx_Bys, plx_bayes_flag_clp,\
plx_tau_autocorr, mean_afs, plx_ess = plxBayes(
plx_offset, plx_chains, plx_runs, plx_burn,
plx_emcee_moves, plx_clp, e_plx_clp)
else:
print(" WARNING: no valid Plx data found")
clp.update({
'plx_flag_clp': plx_flag_clp, 'plx_clrg': plx_clrg,
'mmag_clp': mmag_clp, 'plx_clp': plx_clp,
'e_plx_clp': e_plx_clp, 'plx_Bys': plx_Bys, 'plx_wa': plx_wa,
'plx_bayes_flag_clp': plx_bayes_flag_clp, 'plx_samples': plx_samples,
'plx_Bayes_kde': plx_Bayes_kde, 'plx_tau_autocorr': plx_tau_autocorr,
'mean_afs': mean_afs, 'plx_ess': plx_ess})
return clp
def checkPlx(plx_clrg):
"""
Check that a range of parallaxes is possible.
"""
if plx_clrg.any() and np.min(plx_clrg) < np.max(plx_clrg):
return True
else:
return False
def plxBayes(
plx_offset, plx_chains, plx_runs, plx_burn, plx_emcee_moves,
plx_clp, e_plx_clp, N_conv=1000, tau_stable=0.05):
"""
HARDCODED
N_conv
tau_stable
"""
from emcee import ensemble
# Move used by emcee
mv = [(eval("(moves." + _ + ")")) for _ in plx_emcee_moves]
plx_bayes_flag_clp = True
# Add offset to parallax data.
plx_clp += plx_offset
# Sampler parameters.
ndim, nwalkers, nruns = 1, plx_chains, plx_runs
print(" Bayesian Plx model ({} runs)".format(nruns))
# DE initial mu position
# mu_p = DE_mu_sol(plx_clp, e_plx_clp)
mu_p = np.mean(plx_clp)
# Define the 'r_i' values used to evaluate the integral.
int_max = mu_p + 5.
x, B2 = r_iVals(int_max, plx_clp, e_plx_clp)
# emcee sampler
sampler = ensemble.EnsembleSampler(
nwalkers, ndim, lnprob, args=(x, B2, mu_p), moves=mv)
# Ball of initial guesses around 'mu_p'
# pos0 = np.clip(
# np.array([[mu_p + .05 * np.random.normal()] for i in range(nwalkers)]),
# a_min=0., a_max=None)
# Random initial guesses
pos0 = np.random.uniform(0., 2. * np.mean(plx_clp), (nwalkers, 1))
tau_index, autocorr_vals, afs = 0, np.empty(nruns), np.empty(nruns)
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# HARDCODED
old_tau = np.inf
for i, _ in enumerate(sampler.sample(pos0, iterations=nruns)):
# Only check convergence every X steps
if i % 10 and i < (nruns - 1):
continue
afs[tau_index] = np.mean(sampler.acceptance_fraction)
tau = sampler.get_autocorr_time(tol=0)
autocorr_vals[tau_index] = np.mean(tau)
tau_index += 1
# Check convergence
converged = tau * (N_conv / nwalkers) < i * plx_burn
converged &= np.all(np.abs(old_tau - tau) / tau < tau_stable)
if converged:
print("")
break
old_tau = tau
update_progress.updt(nruns, i + 1)
mean_afs = afs[:tau_index]
tau_autocorr = autocorr_vals[:tau_index]
nburn = int(i * plx_burn)
samples = sampler.get_chain(discard=nburn, flat=True)
# Mode and KDE to plot
# This simulates the 'fundam_params and 'varIdxs' arrays.
fp, vi = [[-np.inf, np.inf], [-np.inf, np.inf]], [0, 1]
plx_Bys_mode, plx_Bayes_kde = modeKDE(fp, vi, 1. / samples.T)
plx_Bys_mode, plx_Bayes_kde = 1. / plx_Bys_mode[0], plx_Bayes_kde[0]
# 16th, median, 84th, mean, mode in Kpc
p16, p50, p84 = np.percentile(samples, (16, 50, 84))
plx_Bys = np.array([p16, p50, p84, np.mean(samples), plx_Bys_mode])
tau = sampler.get_autocorr_time(tol=0)[0]
plx_ess = samples.size / tau
# For plotting, (nsteps, nchains, ndim)
plx_samples = sampler.get_chain()[:, :, 0]
print("Bayesian plx estimated: "
+ "{:.3f} (ESS={:.0f}, tau={:.0f})".format(
1. / plx_Bys[3], plx_ess, tau))
except Exception as e:
print(e)
print("\n ERROR: could not process Plx data with emcee")
plx_samples, plx_Bayes_kde, plx_Bys, plx_bayes_flag_clp, plx_ess,\
tau_autocorr, mean_afs = [], np.array([]), np.array([]), False,\
np.nan, np.nan, np.nan
return plx_samples, plx_Bayes_kde, plx_Bys, plx_bayes_flag_clp,\
tau_autocorr, mean_afs, plx_ess
def DE_mu_sol(plx_clp, e_plx_clp, int_max=20., psize=20, maxi=100):
"""
Use the Differential Evolution algorithm to approximate the best solution
used as the mean of the prior.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Define the 'r_i' values used to evaluate the integral.
x, B2 = r_iVals(int_max, plx_clp, e_plx_clp)
# Use DE to estimate the ML
def DEdist(model):
return -lnlike(model, x, B2)
bounds = [[0., 20.]]
result = DE(DEdist, bounds, popsize=psize, maxiter=maxi)
return result.x[0]
def r_iVals(int_max, plx, e_plx):
"""
The 'r_i' values used to evaluate the integral.
"""
N = int(int_max / 0.01)
x = np.linspace(.1, int_max, N).reshape(-1, 1)
B1 = ((plx - (1. / x)) / e_plx)**2
B2 = (np.exp(-.5 * B1) / e_plx)
return x, B2
def lnprob(mu, x, B2, mu_p):
lp = lnprior(mu, mu_p)
if np.isinf(lp):
return -np.inf
return lp + lnlike(mu, x, B2)
def lnprior(mu, mu_p, std_p=1.):
"""
Log prior.
"""
if mu < 0.:
return -np.inf
# Gaussian > 0
return -0.5 * ((mu - mu_p) / std_p)**2
# Exponential prior proposed by Bailer-Jones.
# return (.5 / (.5 * mu_p)**3) * mu**2 * np.exp(-mu / (.5 * mu_p))
# # Uniform prior
# return 0.
def lnlike(mu, x, B2):
"""
Model defined in Bailer-Jones (2015), Eq (20), The shape parameter s_c
is marginalized.
"""
# Marginalization of the scale parameter 's_c'. We integrate over it
# using the incomplete gamma function as per Wolfram:
#
# https://www.wolframalpha.com/input/
# ?i=integral+exp(-.5*(a%5E2%2Fx%5E2))+%2F+x,+x%3D0+to+x%3Db
#
# This function is equivalent to scipy's 'exp1()', as stated in:
#
# https://stackoverflow.com/a/53148269/1391441
#
# so we use this function to marginalize the 's_c' parameter up to a
# 5 kpc limit.
lim_u = 5.
def distFunc(r_i):
"""
Eq (20) of Bailer-Jones (2015) with everything that can be calculated
outside, moved outside.
"""
sc_int = .5 * exp1(.5 * ((r_i - mu) / lim_u)**2)
sc_int.T[np.isinf(sc_int.T)] = 0.
return B2 * sc_int
# Double integral
int_exp = np.trapz(distFunc(x), x, axis=0)
# Mask 'bad' values
msk = np.logical_or(
int_exp <= 0., int_exp >= np.inf, int_exp <= -np.inf)
int_exp[msk] = np.nan
return np.nansum(np.log(int_exp))
|
asteca/ASteCA
|
packages/data_analysis/plx_analysis.py
|
Python
|
gpl-3.0
| 9,707
|
from sqlalchemy.sql import select
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine, Column, Integer, String, Float, Date, MetaData
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
import sqlite3
from xlrd import open_workbook
import re
import click
from datetime import datetime, date
import pandas as pd
db = "sqlite:///ysdd.db"
project_excel = "d:\\projects\\project.xlsx"
perf_excel = "d:\\projects\\ysdd.xlsm"
phase_excel = "d:\\projects\\phase.xlsx"
Session = sessionmaker()
class Team(Base):
__tablename__ = 'Team'
name = Column(String, primary_key=True)
description = Column(String)
def __repr__(self):
return "<Team(name='%s', description='%s')>" % (self.name, self.description)
class Project(Base):
__tablename__ = 'Project'
name = Column(String, primary_key=True)
team = Column(String, primary_key=True)
date = Column(String)
orderamount = Column(Float)
riskcapital = Column(Float)
partneramount = Column(Integer)
status = Column(String)
period = Column(Integer)
numberofperiod = Column(Integer)
class Performance(Base):
__tablename__ = 'Performance'
team = Column(String, primary_key=True)
project = Column(String, primary_key=True)
number = Column(Integer, primary_key=True)
reportdate = Column(String)
positionratio = Column(Float)
earningrate = Column(Float)
class Phase(Base):
__tablename__ = 'Phase'
team = Column(String, primary_key=True)
project = Column(String, primary_key=True)
number = Column(Integer, primary_key=True)
settlementdate = Column(String, primary_key=True)
traderratio = Column(Float)
investorratio = Column(Float)
marketvalue = Column(Float)
redemption = Column(String)
class MyList(Base):
__tablename__ = 'list'
name = Column(String, primary_key=True)
type = Column(String)
date = Column(Date, primary_key=True)
marketvalue = Column(Integer)
investorratio = Column(Float)
traderratio = Column(Float)
ratioperday = Column(Float)
def clear(engine):
con = engine.connect()
meta = con.metadata
trans = con.begin()
for table in reversed(meta.sorted_tables):
con.execute(table.delete())
trans.commit()
def parse_name(value):
team = value.split('V', 1)[0]
project = value.split(team, 1)[1]
t = project.split('期', 1)
if len(t) > 1:
project = t[0] + t[1]
else:
project = t[0]
return team, project
def parse_period(value):
result = re.search("\d+\*\d+", value)
p, n = result.group(0).split('*')
return int(p), int(n)
def find_period(value):
result = re.search("\d+\*\d+", value)
p, n = result.group(0).split('*')
return int(p)
def parse_number_of_period(value, n):
return n - len(value.split(';')) + 1
def float_to_date(value):
if type(value) is float:
return str(int(value))
elif type(value) is str:
return value
@click.group()
def main():
click.echo('main')
@main.command()
@click.argument('filename', type=click.Path(exists=True))
def ls(filename):
click.echo('history convert')
engine = create_engine(db)
MyList.metadata.create_all(engine)
Session.configure(bind=engine)
session = Session()
wb = open_workbook(filename)
for sheet in wb.sheets():
year = datetime.strptime(sheet.name, "%Y%m").date().year
for row in range(0, sheet.nrows, 2):
datestr = str(int(sheet.cell_value(row, 0)))
if len(datestr) == 3:
datestr = '0' + datestr
d = datetime.strptime(datestr, "%m%d").date()
reportdate = date(year, d.month, d.day)
name = sheet.cell_value(row, 1)
type = sheet.cell_value(row + 1, 1)
marketvalue = sheet.cell_value(row, 2)
investorratio = sheet.cell_value(row, 3)
traderratio = sheet.cell_value(row, 4)
days = find_period(type)
ratioperday = investorratio / days
session.add(MyList(name=name, type=type, date=reportdate, marketvalue=marketvalue,
investorratio=investorratio, traderratio=traderratio,
ratioperday=ratioperday))
session.commit()
def convert_project_name(name):
parts = name.split('V')
return parts[0]
@main.command()
def stats():
engine = create_engine(db)
perf = pd.read_sql_table('list', engine)
perf['name'] = perf['name'].apply(convert_project_name)
t = perf.groupby(by=['name'])
mean = t['ratioperday'].mean()
median = t['ratioperday'].median()
d = {'mean': mean,
'medain': median}
df = pd.DataFrame(d)
click.echo(df)
df.to_excel('report.xlsx')
if __name__ == "__main__":
main()
# engine = create_engine(db)
# Session.configure(bind=engine)
# session = Session()
# wb = open_workbook(project_excel)
# sheet = wb.sheet_by_index(0)
# names = []
# for row in range(sheet.nrows):
# s = sheet.cell(row, 1).value
# name, project_name = parse_name(s)
# p = Project()
# p.name = project_name
# p.team = name
# p.orderamount = float(sheet.cell(row, 2).value)
# p.riskcapital = float(sheet.cell(row, 3).value)
# p.partneramount = int(sheet.cell(row, 5).value)
# p.status = sheet.cell(row, 6).value
# p.date = str(sheet.cell(row, 0).value)
# if name not in names:
# names.append(name)
# session.add(Team(name=name, description="None"))
# session.add(p)
# session.commit()
# perf_wb = open_workbook(perf_excel)
# sheet = perf_wb.sheet_by_index(0)
# date = sheet.name
# for row in range(sheet.nrows):
# period, number = parse_period(sheet.cell(row, 4).value)
# team, project = parse_name(sheet.cell(row, 0).value)
# position = sheet.cell(row, 2).value
# earning = sheet.cell(row, 3).value
# tp = sheet.cell_value(row, 1)
# tp = tp if type(tp) is str else str(int(tp))
# phase = parse_number_of_period(tp, number)
# session.add(Performance(team=team, project=project, number=phase, reportdate=date, positionratio=position, earningrate=earning))
# session.commit()
# phase_wb = open_workbook(phase_excel)
# sheet = phase_wb.sheet_by_index(0)
# for row in range(0, sheet.nrows, 2):
# settlementdate = float_to_date(sheet.cell_value(row, 0))
# team, project = parse_name(sheet.cell_value(row, 1))
# marketvalue = sheet.cell_value(row, 2)
# investorratio = sheet.cell_value(row, 3)
# traderratio = sheet.cell_value(row, 4)
# redemption = sheet.cell_value(row, 5)
# session.add(Phase(team=team, project=project, number=1, marketvalue=marketvalue, settlementdate=settlementdate, investorratio=investorratio, traderratio=traderratio, redemption=redemption))
# session.commit()
|
maxwell-lv/MyQuant
|
ssd.py
|
Python
|
gpl-3.0
| 7,030
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: sros_command
version_added: "2.2"
author: "Peter Sprygada (@privateip)"
short_description: Run commands on remote devices running Nokia SR OS
description:
- Sends arbitrary commands to an SR OS node and returns the results
read from the device. This module includes an argument that will
cause the module to wait for a specific condition before returning
or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(sros_config) to configure SR OS devices.
extends_documentation_fragment: sros
options:
commands:
description:
- List of commands to send to the remote SR OS device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
required: false
default: null
aliases: ['waitfor']
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
required: false
default: all
choices: ['any', 'all']
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
tasks:
- name: run show version on remote devices
sros_command:
commands: show version
provider: "{{ cli }}"
- name: run show version and check to see if output contains sros
sros_command:
commands: show version
wait_for: result[0] contains sros
provider: "{{ cli }}"
- name: run multiple commands on remote nodes
sros_command:
commands:
- show version
- show port detail
provider: "{{ cli }}"
- name: run multiple commands and evaluate the output
sros_command:
commands:
- show version
- show port detail
wait_for:
- result[0] contains TiMOS-B-14.0.R4
provider: "{{ cli }}"
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcli import CommandRunner
from ansible.module_utils.netcli import AddCommandError, FailedConditionsError
from ansible.module_utils.sros import NetworkModule, NetworkError
VALID_KEYS = ['command', 'output', 'prompt', 'response']
def to_lines(stdout):
for item in stdout:
if isinstance(item, basestring):
item = str(item).split('\n')
yield item
def parse_commands(module):
for cmd in module.params['commands']:
if isinstance(cmd, basestring):
cmd = dict(command=cmd, output=None)
elif 'command' not in cmd:
module.fail_json(msg='command keyword argument is required')
elif cmd.get('output') not in [None, 'text']:
module.fail_json(msg='invalid output specified for command')
elif not set(cmd.keys()).issubset(VALID_KEYS):
module.fail_json(msg='unknown keyword specified')
yield cmd
def main():
spec = dict(
# { command: <str>, output: <str>, prompt: <str>, response: <str> }
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
module = NetworkModule(argument_spec=spec,
connect_on_load=False,
supports_check_mode=True)
commands = list(parse_commands(module))
conditionals = module.params['wait_for'] or list()
warnings = list()
runner = CommandRunner(module)
for cmd in commands:
if module.check_mode and not cmd['command'].startswith('show'):
warnings.append('only show commands are supported when using '
'check mode, not executing `%s`' % cmd['command'])
else:
if cmd['command'].startswith('conf'):
module.fail_json(msg='sros_command does not support running '
'config mode commands. Please use '
'sros_config instead')
try:
runner.add_command(**cmd)
except AddCommandError:
exc = get_exception()
warnings.append('duplicate command detected: %s' % cmd)
for item in conditionals:
runner.add_conditional(item)
runner.retries = module.params['retries']
runner.interval = module.params['interval']
runner.match = module.params['match']
try:
runner.run()
except FailedConditionsError:
exc = get_exception()
module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc))
result = dict(changed=False, stdout=list())
for cmd in commands:
try:
output = runner.get_command(cmd['command'])
except ValueError:
output = 'command not executed due to check_mode, see warnings'
result['stdout'].append(output)
result['warnings'] = warnings
result['stdout_lines'] = list(to_lines(result['stdout']))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
sivel/ansible-modules-core
|
network/sros/sros_command.py
|
Python
|
gpl-3.0
| 7,892
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Writes UFO and/or font files."""
from absl import app
from absl import flags
from absl import logging
from collections import Counter
import csv
import dataclasses
import enum
import math
from fontTools import ttLib
from fontTools.misc.arrayTools import rectArea, normRect, unionRect
from fontTools.misc.roundTools import otRound
from fontTools.ttLib.tables import otTables as ot
from fontTools.pens.boundsPen import ControlBoundsPen
from fontTools.pens.transformPen import TransformPen
from itertools import chain
from lxml import etree # pytype: disable=import-error
from nanoemoji.bitmap_tables import make_cbdt_table, make_sbix_table
from nanoemoji import codepoints, config, glyphmap
from nanoemoji.colors import Color
from nanoemoji.config import FontConfig
from nanoemoji.color_glyph import ColorGlyph
from nanoemoji.fixed import fixed_safe
from nanoemoji.glyph import glyph_name
from nanoemoji.glyphmap import GlyphMapping
from nanoemoji.glyph_reuse import GlyphReuseCache
from nanoemoji.paint import (
is_gradient,
is_transform,
transformed,
CompositeMode,
Paint,
PaintComposite,
PaintColrGlyph,
PaintGlyph,
PaintSolid,
)
from nanoemoji.png import PNG
from nanoemoji.svg import make_svg_table
from nanoemoji.svg_path import draw_svg_path
from nanoemoji import util
import os
from pathlib import Path
import ufoLib2
from ufo2ft.outlineCompiler import StubGlyph
from picosvg.svg import SVG
from picosvg.svg_transform import Affine2D
from picosvg.svg_types import SVGPath
import regex
import sys
from typing import (
cast,
Any,
Callable,
Generator,
Iterable,
Mapping,
MutableSequence,
NamedTuple,
Optional,
Sequence,
Tuple,
)
from ufoLib2.objects import Component, Glyph
import ufo2ft
FLAGS = flags.FLAGS
flags.DEFINE_string("config_file", None, "Config filename.")
flags.DEFINE_string("glyphmap_file", None, "Glyphmap filename.")
# A GlyphMapping plus an SVG, typically a picosvg, and/or a PNG
class InputGlyph(NamedTuple):
svg_file: Optional[Path] # either filenames can be omitted, mostly for debugging
bitmap_file: Optional[Path]
codepoints: Tuple[int, ...]
glyph_name: str
svg: Optional[SVG] # None for bitmap formats
bitmap: Optional[PNG] # None for vector formats
# A color font generator.
# apply_ufo(ufo, color_glyphs) is called first, to update a generated UFO
# apply_ttfont(ufo, color_glyphs, ttfont) is called second, to allow fixups after ufo2ft
# Ideally we delete the ttfont stp in future. Blocking issues:
# https://github.com/unified-font-object/ufo-spec/issues/104
# If the output file is .ufo then apply_ttfont is not called.
# Where possible code to the ufo and let apply_ttfont be a nop.
class ColorGenerator(NamedTuple):
apply_ufo: Callable[[FontConfig, ufoLib2.Font, Tuple[ColorGlyph, ...]], None]
apply_ttfont: Callable[
[FontConfig, ufoLib2.Font, Tuple[ColorGlyph, ...], ttLib.TTFont], None
]
font_ext: str # extension for font binary, .ttf or .otf
_COLOR_FORMAT_GENERATORS = {
"glyf": ColorGenerator(lambda *args: _glyf_ufo(*args), lambda *_: None, ".ttf"),
"glyf_colr_0": ColorGenerator(
lambda *args: _colr_ufo(0, *args), lambda *_: None, ".ttf"
),
"glyf_colr_1": ColorGenerator(
lambda *args: _colr_ufo(1, *args), lambda *_: None, ".ttf"
),
"cff_colr_0": ColorGenerator(
lambda *args: _colr_ufo(0, *args), lambda *_: None, ".otf"
),
"cff_colr_1": ColorGenerator(
lambda *args: _colr_ufo(1, *args), lambda *_: None, ".otf"
),
"cff2_colr_0": ColorGenerator(
lambda *args: _colr_ufo(0, *args), lambda *_: None, ".otf"
),
"cff2_colr_1": ColorGenerator(
lambda *args: _colr_ufo(1, *args), lambda *_: None, ".otf"
),
"picosvg": ColorGenerator(
lambda *_: None,
lambda *args: _svg_ttfont(*args, picosvg=True, compressed=False),
".ttf",
),
"picosvgz": ColorGenerator(
lambda *_: None,
lambda *args: _svg_ttfont(*args, picosvg=True, compressed=True),
".ttf",
),
"untouchedsvg": ColorGenerator(
lambda *_: None,
lambda *args: _svg_ttfont(*args, picosvg=False, compressed=False),
".ttf",
),
"untouchedsvgz": ColorGenerator(
lambda *_: None,
lambda *args: _svg_ttfont(*args, picosvg=False, compressed=True),
".ttf",
),
"cbdt": ColorGenerator(
lambda *args: None,
lambda *args: _cbdt_ttfont(*args),
".ttf",
),
"sbix": ColorGenerator(
lambda *args: None,
lambda *args: _sbix_ttfont(*args),
".ttf",
),
# https://github.com/googlefonts/nanoemoji/issues/260 svg, colr
# Non-compressed picosvg because woff2 is likely
# Meant to be subset if used for network delivery
"glyf_colr_1_and_picosvg": ColorGenerator(
lambda *args: _colr_ufo(1, *args),
lambda *args: _svg_ttfont(*args, picosvg=True, compressed=False),
".ttf",
),
# https://github.com/googlefonts/nanoemoji/issues/260 svg, colr, cbdt; max compatibility
# Meant to be subset if used for network delivery
# Non-compressed picosvg because woff2 is likely
# cbdt because sbix is less x-platform than you'd guess (https://github.com/harfbuzz/harfbuzz/issues/2679)
"glyf_colr_1_and_picosvg_and_cbdt": ColorGenerator(
lambda *args: _colr_ufo(1, *args),
lambda *args: _picosvg_and_cbdt(*args),
".ttf",
),
}
assert _COLOR_FORMAT_GENERATORS.keys() == set(config._COLOR_FORMATS)
def _ufo(config: FontConfig) -> ufoLib2.Font:
ufo = ufoLib2.Font()
ufo.info.familyName = config.family
# set various font metadata; see the full list of fontinfo attributes at
# https://unifiedfontobject.org/versions/ufo3/fontinfo.plist/#generic-dimension-information
ufo.info.unitsPerEm = config.upem
# we just use a simple scheme that makes all sets of vertical metrics the same;
# if one needs more fine-grained control they can fix up post build
ufo.info.ascender = (
ufo.info.openTypeHheaAscender
) = ufo.info.openTypeOS2TypoAscender = config.ascender
ufo.info.descender = (
ufo.info.openTypeHheaDescender
) = ufo.info.openTypeOS2TypoDescender = config.descender
ufo.info.openTypeHheaLineGap = ufo.info.openTypeOS2TypoLineGap = config.linegap
# set USE_TYPO_METRICS flag (OS/2.fsSelection bit 7) to make sure OS/2 Typo* metrics
# are preferred to define Windows line spacing over legacy WinAscent/WinDescent:
# https://docs.microsoft.com/en-us/typography/opentype/spec/os2#fsselection
ufo.info.openTypeOS2Selection = [7]
# version
ufo.info.versionMajor = config.version_major
ufo.info.versionMinor = config.version_minor
# Must have .notdef and Win 10 Chrome likes a blank gid1 so make gid1 space
ufo.newGlyph(".notdef")
space = ufo.newGlyph(".space")
space.unicodes = [0x0020]
space.width = config.width
ufo.glyphOrder = [".notdef", ".space"]
# Always the .notdef outline, even for things like a pure SVG font
# This decreases the odds of triggering https://github.com/khaledhosny/ots/issues/52
_draw_notdef(config, ufo)
# use 'post' format 3.0 for TTFs, shaving a kew KBs of unneeded glyph names
ufo.lib[ufo2ft.constants.KEEP_GLYPH_NAMES] = config.keep_glyph_names
return ufo
def _make_ttfont(
config: FontConfig, ufo: ufoLib2.Font, color_glyphs: Tuple[ColorGlyph, ...]
):
if config.output_format == ".ufo":
return None
# Use skia-pathops to remove overlaps (i.e. simplify self-overlapping
# paths) because the default ("booleanOperations") does not support
# quadratic bezier curves (qcurve), which may appear
# when we pass through picosvg (e.g. arcs or stroked paths).
ttfont = None
if config.output_format == ".ttf":
ttfont = ufo2ft.compileTTF(ufo, overlapsBackend="pathops")
if config.output_format == ".otf":
cff_version = 1
if config.color_format.startswith("cff2_"):
cff_version = 2
ttfont = ufo2ft.compileOTF(
ufo, cffVersion=cff_version, overlapsBackend="pathops"
)
if not ttfont:
raise ValueError(
f"Unable to generate {config.color_format} {config.output_format}"
)
return ttfont
def _write(ufo, ttfont, output_file):
logging.info("Writing %s", output_file)
if os.path.splitext(output_file)[1] == ".ufo":
ufo.save(output_file, overwrite=True)
else:
ttfont.save(output_file)
def _not_impl(func_name, color_format, *_):
raise NotImplementedError(f"{func_name} for {color_format} not implemented")
def _next_name(ufo: ufoLib2.Font, name_fn) -> str:
i = 0
while name_fn(i) in ufo:
i += 1
return name_fn(i)
def _create_glyph(
color_glyph: ColorGlyph, paint: PaintGlyph, path_in_font_space: str
) -> Glyph:
glyph = _init_glyph(color_glyph)
ufo = color_glyph.ufo
draw_svg_path(SVGPath(d=path_in_font_space), glyph.getPen())
ufo.glyphOrder += [glyph.name]
return glyph
def _migrate_paths_to_ufo_glyphs(
color_glyph: ColorGlyph, glyph_cache: GlyphReuseCache
) -> ColorGlyph:
svg_units_to_font_units = color_glyph.transform_for_font_space()
# Walk through the color glyph, where we see a PaintGlyph take the path out of it,
# move the path into font coordinates, generate a ufo glyph, and push the name of
# the ufo glyph into the PaintGlyph
def _update_paint_glyph(paint):
if paint.format != PaintGlyph.format:
return paint
if glyph_cache.is_known_glyph(paint.glyph):
return paint
assert paint.glyph.startswith("M"), f"{paint.glyph} doesn't look like a path"
path_in_font_space = (
SVGPath(d=paint.glyph).apply_transform(svg_units_to_font_units).d
)
reuse_result = glyph_cache.try_reuse(path_in_font_space)
if reuse_result is not None:
# TODO: when is it more compact to use a new transforming glyph?
child_transform = Affine2D.identity()
child_paint = paint.paint
if is_transform(child_paint):
child_transform = child_paint.gettransform()
child_paint = child_paint.paint
# sanity check: GlyphReuseCache.try_reuse would return None if overflowed
assert fixed_safe(*reuse_result.transform)
overflows = False
# TODO: handle gradient anywhere in subtree, not only as direct child of
# PaintGlyph or PaintTransform
if is_gradient(child_paint):
# We have a gradient so we need to reverse the effect of the
# reuse_result.transform. First we try to apply the combined transform
# to the gradient's geometry; but this may overflow OT integer bounds,
# in which case we pass through gradient unscaled
transform = Affine2D.compose_ltr(
(child_transform, reuse_result.transform.inverse())
)
# skip reuse if combined transform overflows OT int bounds
overflows = not fixed_safe(*transform)
if not overflows:
try:
child_paint = child_paint.apply_transform(transform)
except OverflowError:
child_paint = transformed(transform, child_paint)
if not overflows:
return transformed(
reuse_result.transform,
PaintGlyph(
glyph=reuse_result.glyph_name,
paint=child_paint,
),
)
glyph = _create_glyph(color_glyph, paint, path_in_font_space)
glyph_cache.add_glyph(glyph.name, path_in_font_space)
return dataclasses.replace(paint, glyph=glyph.name)
return color_glyph.mutating_traverse(_update_paint_glyph)
def _draw_glyph_extents(
ufo: ufoLib2.Font, glyph: Glyph, bounds: Tuple[float, float, float, float]
):
# apparently on Mac (but not Linux) Chrome and Firefox end up relying on the
# extents of the base layer to determine where the glyph might paint. If you
# leave the base blank the COLR glyph never renders.
if rectArea(bounds) == 0:
return
start, end = bounds[:2], bounds[2:]
pen = glyph.getPen()
pen.moveTo(start)
pen.lineTo(end)
pen.endPath()
return glyph
def _draw_notdef(config: FontConfig, ufo: ufoLib2.Font):
# A StubGlyph named .notdef provides a nice drawing of a notdef
notdefArtist = StubGlyph(
".notdef",
config.width,
config.upem,
config.ascender,
config.descender,
)
# UFO doesn't like just sticking StubGlyph directly in place
glyph = ufo[".notdef"]
glyph.width = notdefArtist.width
notdefArtist.draw(glyph.getPen())
def _glyf_ufo(
config: FontConfig, ufo: ufoLib2.Font, color_glyphs: Tuple[ColorGlyph, ...]
):
# We want to mutate our view of color_glyphs
color_glyphs = list(color_glyphs)
# glyphs by reuse_key
glyph_cache = GlyphReuseCache(config.reuse_tolerance)
glyph_uses = Counter()
for i, color_glyph in enumerate(color_glyphs):
logging.debug(
"%s %s %s",
ufo.info.familyName,
color_glyph.ufo_glyph_name,
color_glyph.transform_for_font_space(),
)
parent_glyph = color_glyph.ufo_glyph
# generate glyphs for PaintGlyph's and assign glyph names
color_glyphs[i] = color_glyph = _migrate_paths_to_ufo_glyphs(
color_glyph, glyph_cache
)
for root in color_glyph.painted_layers:
for context in root.breadth_first():
# For 'glyf' just dump anything that isn't a PaintGlyph
if not isinstance(context.paint, PaintGlyph):
continue
paint_glyph = cast(PaintGlyph, context.paint)
glyph = ufo.get(paint_glyph.glyph)
parent_glyph.components.append(
Component(baseGlyph=glyph.name, transformation=context.transform)
)
glyph_uses[glyph.name] += 1
# No great reason to keep single-component glyphs around (unless reused)
for color_glyph in color_glyphs:
parent_glyph = color_glyph.ufo_glyph
if (
len(parent_glyph.components) == 1
and glyph_uses[parent_glyph.components[0].baseGlyph] == 1
):
component = ufo[parent_glyph.components[0].baseGlyph]
del ufo[component.name]
component.unicode = parent_glyph.unicode
ufo[color_glyph.ufo_glyph_name] = component
assert component.name == color_glyph.ufo_glyph_name
def _name_prefix(color_glyph: ColorGlyph) -> Glyph:
return f"{color_glyph.ufo_glyph_name}."
def _init_glyph(color_glyph: ColorGlyph) -> Glyph:
ufo = color_glyph.ufo
glyph = ufo.newGlyph(_next_name(ufo, lambda i: f"{_name_prefix(color_glyph)}{i}"))
glyph.width = ufo.get(color_glyph.glyph_name).width
return glyph
def _init_glyph(color_glyph: ColorGlyph) -> Glyph:
ufo = color_glyph.ufo
glyph = ufo.newGlyph(_next_name(ufo, lambda i: f"{_name_prefix(color_glyph)}{i}"))
glyph.width = color_glyph.ufo_glyph.width
return glyph
def _create_transformed_glyph(
color_glyph: ColorGlyph, paint: PaintGlyph, transform: Affine2D
) -> Glyph:
glyph = _init_glyph(color_glyph)
glyph.components.append(Component(baseGlyph=paint.glyph, transformation=transform))
color_glyph.ufo.glyphOrder += [glyph.name]
return glyph
def _colr0_layers(color_glyph: ColorGlyph, root: Paint, palette: Sequence[Color]):
# COLRv0: write out each PaintGlyph we see in it's first color
# If we see a transformed glyph generate a component
# Results for complex structures will be suboptimal :)
ufo = color_glyph.ufo
layers = []
for context in root.breadth_first():
if context.paint.format != PaintGlyph.format: # pytype: disable=attribute-error
continue
paint_glyph: PaintGlyph = (
context.paint
) # pytype: disable=annotation-type-mismatch
color = next(paint_glyph.colors())
glyph_name = paint_glyph.glyph
if context.transform != Affine2D.identity():
glyph_name = _create_transformed_glyph(
color_glyph, paint_glyph, context.transform
).name
layers.append((glyph_name, color.palette_index(palette)))
return layers
def _quantize_bounding_rect(
xMin: float,
yMin: float,
xMax: float,
yMax: float,
factor: int = 1,
) -> Tuple[int, int, int, int]:
"""
>>> bounds = (72.3, -218.4, 1201.3, 919.1)
>>> _quantize_bounding_rect(*bounds)
(72, -219, 1202, 920)
>>> _quantize_bounding_rect(*bounds, factor=10)
(70, -220, 1210, 920)
>>> _quantize_bounding_rect(*bounds, factor=100)
(0, -300, 1300, 1000)
"""
assert factor >= 1
return (
int(math.floor(xMin / factor) * factor),
int(math.floor(yMin / factor) * factor),
int(math.ceil(xMax / factor) * factor),
int(math.ceil(yMax / factor) * factor),
)
def _transformed_glyph_bounds(
ufo: ufoLib2.Font, glyph_name: str, transform: Affine2D
) -> Optional[Tuple[float, float, float, float]]:
glyph = ufo[glyph_name]
pen = bounds_pen = ControlBoundsPen(ufo)
if not transform.almost_equals(Affine2D.identity()):
pen = TransformPen(bounds_pen, transform)
glyph.draw(pen)
return bounds_pen.bounds
def _bounds(
color_glyph: ColorGlyph, quantize_factor: int = 1
) -> Optional[Tuple[int, int, int, int]]:
bounds = None
for root in color_glyph.painted_layers:
for context in root.breadth_first():
if not isinstance(context.paint, PaintGlyph):
continue
paint_glyph: PaintGlyph = cast(PaintGlyph, context.paint)
glyph_bbox = _transformed_glyph_bounds(
color_glyph.ufo, paint_glyph.glyph, context.transform
)
if glyph_bbox is None:
continue
if bounds is None:
bounds = glyph_bbox
else:
bounds = unionRect(bounds, glyph_bbox)
if bounds is None:
return
# before quantizing to integer values > 1, we must first round floats to
# int using the same rounding function (i.e. otRound) that fontTools
# glyf table's compile method will use to round any float coordinates.
bounds = tuple(otRound(v) for v in bounds)
if quantize_factor > 1:
return _quantize_bounding_rect(*bounds, factor=quantize_factor)
return bounds
def _ufo_colr_layers(
colr_version: int, colors: Sequence[Color], color_glyph: ColorGlyph
):
# The value for a COLOR_LAYERS_KEY entry per
# https://github.com/googlefonts/ufo2ft/pull/359
colr_layers = []
# accumulate layers in z-order
for paint in color_glyph.painted_layers:
if colr_version == 0:
colr_layers.extend(_colr0_layers(color_glyph, paint, colors))
elif colr_version == 1:
colr_layers.append(paint.to_ufo_paint(colors))
else:
raise ValueError(f"Invalid color version {colr_version}")
if colr_version > 0:
colr_layers = {
"Format": int(ot.PaintFormat.PaintColrLayers),
"Layers": colr_layers,
}
return colr_layers
def _colr_ufo(
colr_version: int,
config: FontConfig,
ufo: ufoLib2.Font,
color_glyphs: Tuple[ColorGlyph, ...],
):
# We want to mutate our view of color glyphs
color_glyphs = list(color_glyphs)
# Sort colors so the index into colors == index into CPAL palette.
# We only store opaque colors in CPAL for COLRv1, as 'alpha' is
# encoded separately.
colors = sorted(
set(
c if colr_version == 0 else c.opaque()
for c in chain.from_iterable(g.colors() for g in color_glyphs)
if not c.is_current_color()
)
)
logging.debug("colors %s", colors)
if len(colors) == 0:
# Chrome 98 doesn't like when COLRv1 font has empty CPAL palette, so we
# add one unused color as workaround.
# TODO(anthrotype): File a bug and remove hack once the bug is fixed upstream
colors.append(Color(0, 0, 0, 1.0))
# KISS; use a single global palette
ufo.lib[ufo2ft.constants.COLOR_PALETTES_KEY] = [[c.to_ufo_color() for c in colors]]
# each base glyph maps to a list of (glyph name, paint info) in z-order
ufo_color_layers = {}
# potentially reusable glyphs
glyph_cache = GlyphReuseCache(config.reuse_tolerance)
clipBoxes = {}
quantization = config.clipbox_quantization
if quantization is None:
# by default, quantize clip boxes to an integer value 2% of the UPEM
quantization = round(config.upem * 0.02)
for i, color_glyph in enumerate(color_glyphs):
logging.debug(
"%s %s %s",
ufo.info.familyName,
color_glyph.ufo_glyph_name,
color_glyph.transform_for_font_space(),
)
# generate glyphs for PaintGlyph's and assign glyph names
color_glyphs[i] = color_glyph = _migrate_paths_to_ufo_glyphs(
color_glyph, glyph_cache
)
if color_glyph.painted_layers:
# write out the ufo structures for COLR
ufo_color_layers[color_glyph.ufo_glyph_name] = _ufo_colr_layers(
colr_version, colors, color_glyph
)
bounds = _bounds(color_glyph, quantization)
if bounds is not None:
clipBoxes.setdefault(bounds, []).append(color_glyph.ufo_glyph_name)
ufo.lib[ufo2ft.constants.COLOR_LAYERS_KEY] = ufo_color_layers
if clipBoxes:
if colr_version == 0:
# COLRv0 doesn't define its own bounding boxes, but some implementations
# rely on the extents of the base glyph so we must add those
for bounds, glyphs in clipBoxes.items():
for glyph_name in glyphs:
_draw_glyph_extents(ufo, ufo[glyph_name], bounds)
else:
# COLRv1 clip boxes are stored in UFO lib.plist as an array of 2-tuples,
# each containing firstly the glyph names (array of strings), and secondly
# the clip box values (array of 4 integers for a non-variable box) shared
# by all those glyphs.
ufo.lib[ufo2ft.constants.COLR_CLIP_BOXES_KEY] = [
(glyphs, box) for box, glyphs in clipBoxes.items()
]
def _sbix_ttfont(
config: FontConfig,
_,
color_glyphs: Tuple[ColorGlyph, ...],
ttfont: ttLib.TTFont,
):
make_sbix_table(config, ttfont, color_glyphs)
def _cbdt_ttfont(
config: FontConfig,
_,
color_glyphs: Tuple[ColorGlyph, ...],
ttfont: ttLib.TTFont,
):
make_cbdt_table(config, ttfont, color_glyphs)
def _svg_ttfont(
config: FontConfig,
_,
color_glyphs: Tuple[ColorGlyph, ...],
ttfont: ttLib.TTFont,
picosvg: bool = True,
compressed: bool = False,
):
make_svg_table(config, ttfont, color_glyphs, picosvg, compressed)
def _picosvg_and_cbdt(
config: FontConfig,
_,
color_glyphs: Tuple[ColorGlyph, ...],
ttfont: ttLib.TTFont,
):
picosvg = True
compressed = False
# make the svg table first because it changes glyph order and cbdt cares
make_svg_table(config, ttfont, color_glyphs, picosvg, compressed)
make_cbdt_table(config, ttfont, color_glyphs)
def _ensure_codepoints_will_have_glyphs(ufo, glyph_inputs):
"""Ensure all codepoints we use will have a glyph.
Single codepoint sequences will directly mapped to their glyphs.
We need to add a glyph for any codepoint that is only used in a multi-codepoint sequence.
"""
all_codepoints = set()
direct_mapped_codepoints = set()
for glyph_input in glyph_inputs:
if not glyph_input.codepoints:
continue
if len(glyph_input.codepoints) == 1:
direct_mapped_codepoints.update(glyph_input.codepoints)
all_codepoints.update(glyph_input.codepoints)
need_blanks = all_codepoints - direct_mapped_codepoints
logging.debug("%d codepoints require blanks", len(need_blanks))
glyph_names = []
for codepoint in need_blanks:
# Any layer is fine; we aren't going to draw
glyph = ufo.newGlyph(glyph_name(codepoint))
glyph.unicode = codepoint
glyph_names.append(glyph.name)
ufo.glyphOrder = ufo.glyphOrder + sorted(glyph_names)
def _generate_color_font(config: FontConfig, inputs: Iterable[InputGlyph]):
"""Make a UFO and optionally a TTFont from svgs."""
ufo = _ufo(config)
_ensure_codepoints_will_have_glyphs(ufo, inputs)
color_glyphs = []
glyph_order = list(ufo.glyphOrder)
for glyph_input in inputs:
if glyph_input.glyph_name in glyph_order:
gid = glyph_order.index(glyph_input.glyph_name)
else:
gid = len(glyph_order)
glyph_order.append(glyph_input.glyph_name)
color_glyphs.append(
ColorGlyph.create(
config,
ufo,
str(glyph_input.svg_file) if glyph_input.svg_file else "",
gid,
glyph_input.glyph_name,
glyph_input.codepoints,
glyph_input.svg,
str(glyph_input.bitmap_file) if glyph_input.bitmap_file else "",
glyph_input.bitmap,
)
)
color_glyphs = tuple(color_glyphs)
# TODO: Optimize glyphOrder so that color glyphs sharing the same clip box
# values are placed next to one another in continuous ranges, to minimize number
# of COLRv1 ClipRecords
ufo.glyphOrder = glyph_order
for g in color_glyphs:
ufo_gid = ufo.glyphOrder.index(g.ufo_glyph_name)
assert (
g.glyph_id == ufo_gid
), f"{g.ufo_glyph_name} is {ufo_gid} in ufo, {g.glyph_id} in ColorGlyph"
_COLOR_FORMAT_GENERATORS[config.color_format].apply_ufo(config, ufo, color_glyphs)
if config.fea_file:
with open(config.fea_file) as f:
ufo.features.text = f.read()
logging.debug("fea:\n%s\n" % ufo.features.text)
else:
logging.debug("No fea")
ttfont = _make_ttfont(config, ufo, color_glyphs)
# Permit fixups where we can't express something adequately in UFO
_COLOR_FORMAT_GENERATORS[config.color_format].apply_ttfont(
config, ufo, color_glyphs, ttfont
)
# TODO may wish to nuke 'post' glyph names
return ufo, ttfont
def _inputs(
font_config: FontConfig,
glyph_mappings: Sequence[GlyphMapping],
) -> Generator[InputGlyph, None, None]:
for g in glyph_mappings:
picosvg = None
if font_config.has_svgs:
if not g.svg_file:
raise ValueError(f"No svg file for glyph {g.glyph_name}")
try:
picosvg = SVG.parse(g.svg_file)
except etree.ParseError as e:
raise IOError(f"Unable to parse {g.svg_file}") from e
bitmap = None
if font_config.has_bitmaps:
if not g.bitmap_file:
raise ValueError(f"No bitmap file for glyph {g.glyph_name}")
bitmap = PNG.read_from(g.bitmap_file)
yield InputGlyph(
g.svg_file,
g.bitmap_file,
g.codepoints,
g.glyph_name,
picosvg,
bitmap,
)
def main(argv):
config_file = None
if FLAGS.config_file:
config_file = Path(FLAGS.config_file)
font_config = config.load(config_file)
if len(font_config.masters) != 1:
raise ValueError("write_font expects only one master")
inputs = list(_inputs(font_config, glyphmap.parse_csv(FLAGS.glyphmap_file)))
if not inputs:
sys.exit("Please provide at least one svg filename")
ufo, ttfont = _generate_color_font(font_config, inputs)
_write(ufo, ttfont, font_config.output_file)
logging.info("Wrote %s" % font_config.output_file)
if __name__ == "__main__":
flags.mark_flag_as_required("glyphmap_file")
app.run(main)
|
googlefonts/nanoemoji
|
src/nanoemoji/write_font.py
|
Python
|
apache-2.0
| 29,050
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import six
from agate import Table
from agate.data_types import *
from agate.testcase import AgateTestCase
class TestPrintTable(AgateTestCase):
def setUp(self):
self.rows = (
('1.7', 2000, 'a'),
('11.18', None, None),
('0', 1, 'c')
)
self.number_type = Number()
self.international_number_type = Number(locale='de_DE')
self.text_type = Text()
self.column_names = ['one', 'two', 'three']
self.column_types = [
self.number_type,
self.international_number_type,
self.text_type
]
def test_print_table(self):
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_table(output=output)
lines = output.getvalue().split('\n')
self.assertEqual(len(lines), 6)
self.assertEqual(len(lines[0]), 25)
def test_print_table_max_rows(self):
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_table(max_rows=2, output=output)
lines = output.getvalue().split('\n')
self.assertEqual(len(lines), 6)
self.assertEqual(len(lines[0]), 25)
def test_print_table_max_columns(self):
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_table(max_columns=2, output=output)
lines = output.getvalue().split('\n')
self.assertEqual(len(lines), 6)
self.assertEqual(len(lines[0]), 23)
def test_print_table_max_precision(self):
rows = (
('1.745', 1.745, 1.72),
('11.123456', 11.123456, 5.10),
('0', 0, 0.10)
)
column_names = ['text_number', 'real_long_number', 'real_short_number']
column_types = [
self.text_type,
self.number_type,
self.number_type
]
table = Table(rows, column_names, column_types)
output = six.StringIO()
table.print_table(output=output, max_precision=2)
lines = output.getvalue().split('\n')
# Text shouldn't be affected
self.assertIn(u' 1.745 ', lines[2])
self.assertIn(u' 11.123456 ', lines[3])
self.assertIn(u' 0 ', lines[4])
# Test real precision above max
self.assertIn(u' 1.74… ', lines[2])
self.assertIn(u' 11.12… ', lines[3])
self.assertIn(u' 0.00… ', lines[4])
# Test real precision below max
self.assertIn(u' 1.72 ', lines[2])
self.assertIn(u' 5.10 ', lines[3])
self.assertIn(u' 0.10 ', lines[4])
def test_print_table_max_column_width(self):
rows = (
('1.7', 2, 'this is long'),
('11.18', None, None),
('0', 1, 'nope')
)
column_names = ['one', 'two', 'also, this is long']
table = Table(rows, column_names, self.column_types)
output = six.StringIO()
table.print_table(output=output, max_column_width=7)
lines = output.getvalue().split('\n')
self.assertIn(' also... ', lines[0])
self.assertIn(' this... ', lines[2])
self.assertIn(' nope ', lines[4])
def test_print_table_locale(self):
"""
Verify that the locale of the international number is correctly
controlling the format of how it is printed.
"""
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_table(max_columns=2, output=output, locale='de_DE')
# If it's working, the english '2,000' should appear as '2.000'
self.assertTrue("2.000" in output.getvalue())
|
onyxfish/agate
|
tests/test_table/test_print_table.py
|
Python
|
mit
| 3,803
|
import socket # Import socket module
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 8082 # Reserve a port for your service.
s.connect((host, port))
s.send("send".encode())
check = s.recv(1024).decode()
print(check)
while True:
if check == 'ok':
f = open('tosend.png','rb')
print ('Sending...')
l = f.read(1024)
while (l):
s.send(l)
l = f.read(1024)
print ('Sending...')
f.close()
#s.send("endtrans".encode())
print ("Done Sending")
s.shutdown(socket.SHUT_WR)
s.close # Close the socket when done
break
|
ukholiday94/Project
|
1stStep/client.py
|
Python
|
gpl-3.0
| 634
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.core.targets.dependencies import Dependencies
from pants.backend.core.targets.doc import Page
from pants.backend.core.tasks.filter import Filter
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.base.exceptions import TaskError
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class BaseFilterTest(ConsoleTaskTestBase):
@property
def alias_groups(self):
return BuildFileAliases(
targets={
'target': Dependencies,
'java_library': JavaLibrary,
'page': Page,
'python_library': PythonLibrary,
'python_requirement_library': PythonRequirementLibrary,
}
)
@classmethod
def task_type(cls):
return Filter
class FilterEmptyTargetsTest(BaseFilterTest):
def test_no_filters(self):
self.assert_console_output()
def test_type(self):
self.assert_console_output(options={'type': ['page']})
self.assert_console_output(options={'type': ['java_library']})
def test_regex(self):
self.assert_console_output(options={'regex': ['^common']})
self.assert_console_output(options={'regex': ['-^common']})
class FilterTest(BaseFilterTest):
def setUp(self):
super(FilterTest, self).setUp()
requirement_injected = set()
def add_to_build_file(path, name, *deps):
if path not in requirement_injected:
self.add_to_build_file(path, "python_requirement_library(name='foo')")
requirement_injected.add(path)
all_deps = ["'{0}'".format(dep) for dep in deps] + ["':foo'"]
self.add_to_build_file(path, dedent("""
python_library(name='{name}',
dependencies=[{all_deps}],
tags=['{tag}']
)
""".format(name=name, tag=name + "_tag", all_deps=','.join(all_deps))))
add_to_build_file('common/a', 'a')
add_to_build_file('common/b', 'b')
add_to_build_file('common/c', 'c')
add_to_build_file('overlaps', 'one', 'common/a', 'common/b')
add_to_build_file('overlaps', 'two', 'common/a', 'common/c')
add_to_build_file('overlaps', 'three', 'common/a', 'overlaps:one')
def test_roots(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('common/::'),
extra_targets=self.targets('overlaps/::')
)
def test_nodups(self):
targets = [self.target('common/b')] * 2
self.assertEqual(2, len(targets))
self.assert_console_output(
'common/b:b',
targets=targets
)
def test_no_filters(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::')
)
def test_filter_type(self):
self.assert_console_output(
'common/a:a',
'common/b:b',
'common/c:c',
'overlaps:one',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'type': ['python_library']}
)
self.assert_console_output(
'common/a:foo',
'common/b:foo',
'common/c:foo',
'overlaps:foo',
targets=self.targets('::'),
options={'type': ['-python_library']}
)
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::'),
# Note that the comma is inside the string, so these are ORed.
options={'type': ['python_requirement_library,python_library']}
)
def test_filter_multiple_types(self):
# A target can only have one type, so the output should be empty.
self.assert_console_output(
targets=self.targets('::'),
options={'type': ['python_requirement_library', 'python_library']}
)
def test_filter_target(self):
self.assert_console_output(
'common/a:a',
'overlaps:foo',
targets=self.targets('::'),
options={'target': ['common/a,overlaps/:foo']}
)
self.assert_console_output(
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'target': ['-common/a:a,overlaps:one,overlaps:foo']}
)
def test_filter_ancestor(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'overlaps:one',
'overlaps:foo',
targets=self.targets('::'),
options={'ancestor': ['overlaps:one,overlaps:foo']}
)
self.assert_console_output(
'common/c:c',
'common/c:foo',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'ancestor': ['-overlaps:one,overlaps:foo']}
)
def test_filter_ancestor_out_of_context(self):
"""Tests that targets outside of the context used as filters are parsed before use."""
# Add an additional un-injected target, and then use it as a filter.
self.add_to_build_file("blacklist", "target(name='blacklist', dependencies=['common/a'])")
self.assert_console_output(
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::'),
options={'ancestor': ['-blacklist']}
)
def test_filter_ancestor_not_passed_targets(self):
"""Tests filtering targets based on an ancestor not in that list of targets."""
# Add an additional un-injected target, and then use it as a filter.
self.add_to_build_file("blacklist", "target(name='blacklist', dependencies=['common/a'])")
self.assert_console_output(
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('common/::'), # blacklist is not in the list of targets
options={'ancestor': ['-blacklist']}
)
self.assert_console_output(
'common/a:a', # a: _should_ show up if we don't filter.
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('common/::'),
options={'ancestor': []}
)
def test_filter_regex(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('::'),
options={'regex': ['^common']}
)
self.assert_console_output(
'common/a:foo',
'common/b:foo',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::'),
options={'regex': ['+foo,^overlaps']}
)
self.assert_console_output(
'overlaps:one',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'regex': ['-^common,foo$']}
)
# Invalid regex.
self.assert_console_raises(TaskError,
targets=self.targets('::'),
options={'regex': ['abc)']}
)
def test_filter_tag_regex(self):
# Filter two.
self.assert_console_output(
'overlaps:three',
targets=self.targets('::'),
options={'tag_regex': ['+e(?=e)']}
)
# Removals.
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:foo',
'overlaps:three',
targets=self.targets('::'),
options={'tag_regex': ['-one|two']}
)
# Invalid regex.
self.assert_console_raises(TaskError,
targets=self.targets('::'),
options={'tag_regex': ['abc)']}
)
def test_filter_tag(self):
# One match.
self.assert_console_output(
'common/a:a',
targets=self.targets('::'),
options={'tag': ['+a_tag']}
)
# Two matches.
self.assert_console_output(
'common/a:a',
'common/b:b',
targets=self.targets('::'),
options={'tag': ['+a_tag,b_tag']}
)
# One removal.
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:foo',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'tag': ['-one_tag']}
)
# Two removals.
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:foo',
'overlaps:three',
targets=self.targets('::'),
options={'tag': ['-one_tag,two_tag']}
)
# No match.
self.assert_console_output(
targets=self.targets('::'),
options={'tag': ['+abcdefg_tag']}
)
# No match due to AND of separate predicates.
self.assert_console_output(
targets=self.targets('::'),
options={'tag': ['a_tag', 'b_tag']}
)
|
sameerparekh/pants
|
tests/python/pants_test/backend/core/tasks/test_filter.py
|
Python
|
apache-2.0
| 9,825
|
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import load_backend, login
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.shortcuts import redirect
from importlib import import_module
from django.utils import six
from django.contrib.auth import get_user_model
def _load_module(path):
"""Code to load create user module. Copied off django-browserid."""
i = path.rfind(".")
module, attr = path[:i], path[i + 1 :]
try:
mod = import_module(module)
except ImportError:
raise ImproperlyConfigured("Error importing CAN_LOGIN_AS" " function.")
except ValueError:
raise ImproperlyConfigured(
"Error importing CAN_LOGIN_AS" " function. Is CAN_LOGIN_AS a" " string?"
)
try:
can_login_as = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured(
"Module {0} does not define a {1} " "function.".format(module, attr)
)
return can_login_as
def user_login(request, user_id):
user_id = int(user_id)
CAN_LOGIN_AS = getattr(settings, "CAN_LOGIN_AS", lambda r, y: r.user.is_superuser)
if isinstance(CAN_LOGIN_AS, six.string_types):
can_login_as = _load_module(CAN_LOGIN_AS)
elif hasattr(CAN_LOGIN_AS, "__call__"):
can_login_as = CAN_LOGIN_AS
else:
raise ImproperlyConfigured(
"The CAN_LOGIN_AS setting is neither a valid module nor callable."
)
user = get_user_model().objects.get(pk=user_id)
login_as_allowed = False
returning_user = False
# default check
if can_login_as(request, user):
login_as_allowed = True
# returning user check
if request.session.get("loginas_original_user_id", None) == user_id:
login_as_allowed = True
returning_user = True
if not login_as_allowed:
raise PermissionDenied("You are not allowed to login as a different user!")
# Find a suitable backend.
if not hasattr(user, "backend"):
for backend in settings.AUTHENTICATION_BACKENDS:
if user == load_backend(backend).get_user(user.pk):
user.backend = backend
break
# Log the user in.
if hasattr(user, "backend"):
messages.info(request, 'You are logged in as "%s" now.' % user.username)
# store original user id
original_user_id = request.user.pk
login(request, user)
if returning_user:
request.session["loginas_original_user_id"] = None
else:
request.session["loginas_original_user_id"] = original_user_id
return redirect(request.GET.get("next", "/"))
|
digris/openbroadcast.org
|
website/tools/loginas/views.py
|
Python
|
gpl-3.0
| 2,714
|
import sys
from PyQt4 import QtGui
import Ui_MainWindow
import AddFloorDialog
import OpenFloorDialog
class MainWindow(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.ui = Ui_MainWindow.Ui_MainWindow();
self.ui.setupUi(self);
self.ui.actionNew.triggered.connect(self.newTriggered)
self.ui.actionOpen.triggered.connect(self.openFile)
self.ui.actionAddFloor.triggered.connect(self.addFloor)
def newTriggered(self):
i=0
def openFile(self):
filename = QtGui.QFileDialog.getOpenFileName()
self.addThing = OpenFloorDialog.OpenFloorDialog()
self.addThing.drawSceneFromFile(filename)
def addFloor(self):
self.addThing = AddFloorDialog.AddFloorDialog()
app = QtGui.QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.show()
app.exec_()
|
shufeike/TerrianEditor
|
main.py
|
Python
|
gpl-2.0
| 873
|
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, getdate, cstr
from frappe import _
from datetime import date, timedelta
from datetime import datetime
#from frappe.utils import now, add_minutes
def execute(filters=None):
columns, data = [], []
columns = get_columns()
data = get_result(filters)
return columns, data
def get_columns():
return [_("Appointment ID") + ":data:200",_("Status") + ":data:100", _("Customer") + ":Link/Customer:150",
_("Starts On") + ":Datetime:150", _("Ends On") + ":Datetime:150", _("Attended By") + ":Link/Employee:120",
_("Service") + ":data:250"]
def get_result(filters):
data = []
today = datetime.now()
today1 = today - timedelta(minutes = 150)
nexttime = today + timedelta(minutes = 20)
nexttime1 = nexttime - timedelta(minutes=150)
company = frappe.get_value("User", frappe.session.user, "company")
data = frappe.db.sql("""select name, status, customer, starts_on, ends_on, employee, total_services from
`tabAppointment` where status = 'Open' and starts_on <= '%s' and starts_on >= '%s'order by starts_on asc """%(nexttime1,today1),as_list=1)
return data
|
shitolepriya/Saloon_erp
|
erpnext/crm/report/todays_appointments/todays_appointments.py
|
Python
|
agpl-3.0
| 1,158
|
"""
Quotes
======
The ``quotes`` plugin can be used to capture quotes in a database. It will also
print a quote containg the name of the joining person on every join.
It provides the following commands:
- ``qadd <quote>``
Adds a new quote to the database
- ``qdelete <quote id>``
Admin only.
Deletes the quote with the specified ID.
- ``qget <quote id>``
Get the quote with the specified ID.
- ``qlast``
Get the last inserted quote.
- ``qrandom``
Retrieve a random quote.
- ``qdislike <quote id>``
Dislikes the quote.
- ``qlike <quote id>``
Likes the quote.
- ``qtop <limit>``
Shows the ``limit`` (default: ``max_quotes``) quotes with the best rating.
- ``qflop <limit>``
Shows the ``limit`` (default: ``max_quotes``) quotes with the worst rating.
- ``quotestats``
Display some stats about the quote database.
This is currently limited to the total number of quotes and the percentage
of quotes per author.
- ``searchquote <text>``
Search for a quote containing ``text``.
Options
-------
- ``database_path``
The path to the SQLite database file. Defaults to ``~/.lala/quotes.sqlite3``.
- ``max_quotes``
The maximum number of quotes to print when using ``searchquote`` or
``qtop``/``qflop``. Defaults to 5.
"""
from __future__ import division
import logging
import os
from collections import defaultdict
from functools import partial
from lala.util import command, msg, on_join
from lala.config import get, get_int
from twisted.enterprise import adbapi
from twisted.internet.defer import inlineCallbacks
__all__ = ()
DEFAULT_OPTIONS = {"DATABASE_PATH": os.path.join(os.path.expanduser("~/.lala"),
"quotes.sqlite3"),
"MAX_QUOTES": "5"}
MESSAGE_TEMPLATE = "[%s] %s"
MESSAGE_TEMPLATE_WITH_RATING = "[%s] %s (rating: %s, votes: %s)"
def _openfun(c):
c.execute("PRAGMA foreign_keys = ON;")
db_connection = None
database_path = None
db_connection = None
def run_query(query, values=[], callback=None):
res = db_connection.runQuery(query, values)
if callback is not None:
res.addCallback(callback)
return res
def run_interaction(func, callback=None, **kwargs):
res = db_connection.runInteraction(func, kwargs)
if callback is not None:
res.addCallback(callback)
return res
def init():
global database_path
global db_connection
database_path = get("database_path")
db_connection = adbapi.ConnectionPool("sqlite3", database_path,
check_same_thread=False,
cp_openfun=_openfun,
cp_min=1)
def f(txn, *args):
txn.execute("""CREATE TABLE IF NOT EXISTS author(
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE);""")
txn.execute("""CREATE TABLE IF NOT EXISTS quote(
id INTEGER PRIMARY KEY AUTOINCREMENT,
quote TEXT,
author INTEGER NOT NULL REFERENCES author(id));""")
txn.execute("""CREATE TABLE IF NOT EXISTS voter (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE);""")
txn.execute("""CREATE TABLE IF NOT EXISTS vote (
id INTEGER PRIMARY KEY AUTOINCREMENT,
vote INT NOT NULL,
quote INTEGER NOT NULL REFERENCES quote(id),
voter INTEGER NOT NULL REFERENCES voter(id),
CONSTRAINT valid_vote CHECK (vote IN (-1, 1)),
CONSTRAINT unique_quote_voter UNIQUE (quote, voter));""")
return run_interaction(f)
@command(aliases=["qget"])
def getquote(user, channel, text):
"""Show the quote with a specified number"""
def callback(quotes):
if len(quotes) > 0 and quotes[0][0] is not None:
msg(channel, MESSAGE_TEMPLATE_WITH_RATING % quotes[0])
else:
msg(channel, "%s: There's no quote #%s" % (user,
text))
if text:
logging.info("Trying to get quote number %s" % text)
run_query("""SELECT q.id, q.quote, sum(v.vote) as rating, count(v.vote)
as votes
FROM quote q
LEFT JOIN vote v
ON v.quote = q.id
WHERE q.id = ?;""",
[text],
callback)
@command(aliases=["qadd"])
def addquote(user, channel, text):
"""Add a quote"""
if text:
def add(txn, *args):
logging.info("Adding author %s" % user)
txn.execute("INSERT OR IGNORE INTO author (name) values (?)",
[user])
logging.info("Adding quote: %s" % text)
txn.execute("INSERT INTO quote (quote, author)\
SELECT (?), rowid\
FROM author WHERE name = (?);",
[text, user])
txn.execute("SELECT max(rowid) FROM quote;", [])
num = txn.fetchone()
msg(channel, "New quote: %s" % num)
return run_interaction(add)
else:
msg(channel, "%s: You didn't give me any text to quote " % user)
@command(admin_only=True, aliases=["qdelete"])
def delquote(user, channel, text):
"""Delete a quote with a specified number"""
if text:
logging.debug("delquote: %s" % text)
def interaction(txn, *args):
logging.debug("Deleting quote %s" % text)
txn.execute("DELETE FROM quote WHERE rowid = (?)", [text])
txn.execute("SELECT changes()")
res = txn.fetchone()
logging.debug("%s changes" % res)
return int(res[0])
def callback(changes):
if changes > 0:
msg(channel, "Quote #%s has been deleted." % text)
return
else:
msg(channel, "It doesn't look like quote #%s exists." %
text)
return run_interaction(interaction, callback)
@command(aliases=["qlast"])
def lastquote(user, channel, text):
"""Show the last quote"""
callback = partial(_single_quote_callback, channel)
run_query("SELECT rowid, quote FROM quote ORDER BY rowid DESC\
LIMIT 1;", [], callback)
@command(aliases=["qrandom"])
def randomquote(user, channel, text):
"""Show a random quote"""
callback = partial(_single_quote_callback, channel)
run_query("SELECT rowid, quote FROM quote ORDER BY random() DESC\
LIMIT 1;", [], callback)
@command(aliases=["qsearch"])
def searchquote(user, channel, text):
"""Search for a quote"""
def callback(quotes):
max_quotes = get_int("max_quotes")
if len(quotes) > max_quotes:
msg(channel, "Too many results, please refine your search")
elif len(quotes) == 0:
msg(channel, "No matching quotes found")
else:
for quote in quotes:
_send_quote_to_channel(channel, quote)
run_query(
"SELECT rowid, quote FROM quote WHERE quote LIKE (?)",
["".join(("%", text, "%"))],
callback
)
@command(aliases=["qstats"])
@inlineCallbacks
def quotestats(user, channel, text):
"""Display statistics about all quotes."""
result = yield run_query("SELECT count(quote) from quote;")
quote_count = result[0][0]
msg(channel, "There are a total of %i quotes." % quote_count)
rows = yield run_query(
"""
SELECT count(q.quote) AS c, a.name
FROM quote q
JOIN author a
ON q.author = a.rowid
GROUP BY a.rowid;
"""
)
count_author_dict = defaultdict(list)
for count, author in rows:
count_author_dict[count].append(author)
for count, authors in sorted(count_author_dict.items(), reverse=True):
percentage = (count * 100) / quote_count
if len(authors) > 1:
msg(channel, "%s each added %i quote(s) (%.2f%%)" %
(", ".join(authors), count, percentage))
else:
msg(channel, "%s added %i quote(s) (%.2f%%)" %
(authors[0], count, percentage))
def _like_impl(user, channel, text, votevalue):
if not len(text):
msg(channel,
"%s: You need to specify the number of the quote you like!" % user)
return
quotenumber = int(text)
def interaction(txn, *args):
logging.debug("Adding 1 vote for %i by %s" % (quotenumber, user))
txn.execute("""INSERT OR IGNORE INTO voter (name) VALUES (?);""",
[user])
txn.execute("""INSERT OR REPLACE INTO vote (vote, quote, voter)
SELECT ?, ?, voter.rowid
FROM voter
WHERE voter.name = ?;""",
[votevalue, quotenumber, user])
logging.debug("Added 1 vote for %i by %s" % (quotenumber, user))
msg(channel, "%s: Your vote for quote #%i has been accepted!"
% (user, quotenumber))
return run_interaction(interaction)
@command
def qlike(user, channel, text):
"""`Likes` a quote.
"""
return _like_impl(user, channel, text, 1)
@command
def qdislike(user, channel, text):
"""`Dislikes` a quote.
"""
return _like_impl(user, channel, text, -1)
@inlineCallbacks
def _topflopimpl(channel, text, top=True):
"""Shows quotes with the best or worst rating.
If ``top`` is True, the quotes with the best ratings will be shown,
otherwise the ones with the worst.
"""
if text:
limit = int(text)
else:
limit = get("max_quotes")
results = yield run_query(
"""
SELECT quote.id, quote.quote, sum(vote) as rating, count(vote) as votes
FROM vote
JOIN quote
ON vote.quote = quote.id
GROUP BY vote.quote
ORDER BY rating %s
LIMIT (?);""" % ("DESC" if top else "ASC"),
[limit])
for row in results:
msg(channel, MESSAGE_TEMPLATE_WITH_RATING % row)
@command
def qtop(user, channel, text):
"""Shows the quotes with the best rating.
"""
return _topflopimpl(channel, text, True)
@command
def qflop(user, channel, text):
"""Shows the quotes with the worst rating.
"""
return _topflopimpl(channel, text, False)
@on_join
def join(user, channel):
def callback(quotes):
try:
_send_quote_to_channel(channel, quotes[0])
except IndexError:
return
run_query("SELECT rowid, quote FROM quote where quote LIKE (?)\
ORDER BY random() LIMIT 1;", ["".join(["%", user, "%"])], callback)
def _single_quote_callback(channel, quotes):
try:
_send_quote_to_channel(channel, quotes[0])
except IndexError:
return
def _send_quote_to_channel(channel, quote):
(id, quote) = quote
msg(channel, MESSAGE_TEMPLATE % (id, quote))
|
mineo/lala
|
lala/plugins/quotes.py
|
Python
|
mit
| 10,919
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.