repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
mehmoodz/Influenzr | publishers/resources/simple_ws.py | Python | mit | 818 | 0.01956 | from tornado import websocket, web, ioloop, autoreload
import json
import sys
new_msg=''
old_msg=''
def send_response():
print 'msg'
if new_msg<>old_msg:
print new_msg
class SocketHandler(websocket | .WebSocketHandler):
''' websocket handler '''
def open(self):
''' ran once an open ws connection is made'''
self.send('Opened')
socket=self
def send(self,message):
self.write_message(message)
def on_close(s | elf):
''' on close event, triggered once a connection is closed'''
self.send('Closed')
app = web.Application([
(r'/ws', SocketHandler),
])
if __name__ == '__main__':
app.listen(8888)
autoreload.add_reload_hook(send_response)
autoreload.start()
new_msg='boo'
ioloop.IOLoop.instance().start()
|
python-provy/provy | provy/more/debian/security/iptables.py | Python | mit | 10,980 | 0.004098 | from provy.core import Role
from provy.more.debian.package.aptitude import AptitudeRole
'''
Roles in this namespace are meant to provide `iptables <http://www.netfilter.org/>`_ management utilities for Debian distributions.
'''
class IPTablesRole(Role):
'''
This role provides `iptables <http://www.netfilter.org/>`_ utilities for Debian distributions.
.. note::
There are two important behaviors to notice:
1. Right after iptables is installed, this role allows TCP incoming connections to port 22, so that provy can still continue to provision the server through SSH.
2. Right before exiting the `with using(IPTablesRole)` block, it blocks all other ports and protocols, so that the server is secure by default.
So, when using this role, remember to allow all the ports with protocols that you need, otherwise you might not be able to connect to the services you provision later on.
:param block_on_finish: If :data:`False`, doesn't block other ports and protocols when finishing the usage of this role. Defaults to :data:`True`.
:type block_on_finish: :class:`bool`
Example:
::
from provy.core import Role
from provy.more.debian import IPTablesRole
class MySampleRole(Role):
def provision(self):
# this example allows only incoming HTTP connections
with self.using(IPTablesRole) as iptables:
iptables.allow('http')
# this example allows any incoming connections, but block SSH outgoing connections
with self.using(IPTablesRole) as iptables:
iptables.block_on_finish = False
iptables.reject(22, direction="out") # here we used a number, but could be "ssh" as well
# this example allows established sessions in interface eth0
with self.using(IPTablesRole) as iptables:
iptables.allow(interface='eth0', match='state', state='ESTABLISHED,RELATED')
'''
DIRECTION_TO_CHAIN_MAP = {
"in": "INPUT",
"out": "OUTPUT",
"forward": "FORWARD",
}
def __init__(self, prov, context):
super(IPTablesRole, self).__init__(prov, context)
self.block_on_finish = True
def provision(self):
'''
Installs iptables and its dependencies, if they're not already installed (though this is usually the case).
Also, it adds an `ACCEPT` rule for SSH (TCP/22), so that provy can continue to provision the server, and the user doesn't get locked out of it.
Example:
::
from provy.core import Role
from provy.more.debian import IPTablesRole
class MySampleRole(Role):
def provision(self):
self.provision_role(IPTablesRole) # no need to call this if using with block.
'''
with self.using(AptitudeRole) as aptitude:
aptitude.ensure_package_installed('iptables')
self.allow('ssh')
def list_rules(self):
'''
Lists the currently configured rules and returns them as a multiline string. Equivalent to running:
.. code-block:: sh
$ sudo iptables -L
Example:
::
from provy.core import Role
from provy.more.debian import IPTablesRole
class MySampleRole(Role):
def provision(self):
with self.using(IPTablesRole) as iptables:
iptables.list_rules()
'''
return self.execute('iptables -L', stdout=True, sudo=True)
def list_rules_with_commands(self):
'''
Like :meth:`list_rules`, but showing the rules as executable commands. Equivalent to running:
.. code-block:: sh
$ sudo iptables-save
Example:
::
from provy.core import Role
from provy.more.debian import IPTablesRole
class MySampleRole(Role):
def provision(self):
with self.using(IPTablesRole) as iptables:
iptables.list_rules_with_commands()
'''
return self.execute('iptables-save', stdout=True, sudo=True)
def schedule_cleanup(self):
'''
Apart from the core cleanup, this one also blocks other ports and protocols not allowed earlier ("catch-all" as the last rule)
and saves the iptables rules to the iptables config file, so that it's not lost upon restart.
Example:
::
from provy.core import Role
from provy.more.debian import IPTablesRole
class MySampleRole(Role):
def provision(self):
with self.using(IPTablesRole) as iptables:
self.schedule_cleanup() # no need to call this explicitly
'''
super(IPTablesRole, self).schedule_cleanup()
if self.block_on_finish:
self.reject()
self.execute("iptables-save > /etc/iptables.rules", stdout=False, sudo=True)
def __change_rule(self, policy, port, direction, protocol, interface, match=None, **options):
chain = self.DIRECTION_TO_CHAIN_MAP[direction]
command = "iptables -A %s -j %s -p %s" % (chain, policy, protocol)
if interface is not None:
command += " -i %s" % interface
if port is not None:
command += " --dport %s" % port
if match is not None:
command += " -m %s" % match
for option_name in options:
command += " --%s %s" % (option_name, options[option_name])
self.execute(command, stdout=False, sudo=True)
def allow(self, port=None, direction="in", protocol="tcp", interface=None, match=None, **options):
'''
Allows connections to be made to or from the server.
:param port: Port to be used. Defaults to None, which means all ports will be allowed. Defaults to :data:`None`.
:type port: :class:`int` or :class:`str`
:param direction: Direction of the connection related to the server. Can be either "in" (connections coming into the server), "out" (connections coming from the server to the outside) or "forward" (packet routing). Defaults to "in".
:type direction: :class:`str`
:param protocol: Protocol to be used - choose one that is understandable by iptables (like "udp", "icmp" etc). Defaults to "tcp".
:type protocol: :class:`str`
:param interface: The network interface to which the rule is bound to. Defaults to :data:`None` (bound to all).
:type interface: :class:`str`
:param match: Match filter. Defaults to :data:`None`.
:type match: :class:`str`
:param options: Arbitrary of arbitrary options that will be used in conjunction to the match filters.
:type options: Keyword arguments of :class:`str`
Example:
::
from provy.core import Role
f | rom provy.more.debian import IPTablesRole
class MySampleRole( | Role):
def provision(self):
with self.using(IPTablesRole) as iptables:
iptables.allow(port=11211, direction="out", protocol="udp") # allow UDP connections to an external Memcached server.
'''
self.__change_rule("ACCEPT", port, direction, protocol, interface, match, **options)
def reject(self, port=None, direction="in", protocol="all", interface=None, match=None, **options):
'''
Rejects connections to be made to or from the server, responding with a "connection refused" packet.
:param port: Port to be used. Defaults to None, which means all ports will be allowed.
:type port: :class:`int` or :class:`str`
:param direction: Direction of the connection related to the server. Can be either "in" (connections coming into the server), "out" (connections coming from the server to the outside) or "forward" (packet routing). Defaults to "in".
:type direction: :class:`str`
:param protocol: Protocol to be used - choose one that is understandable |
rmcauley/rainwave | api_requests/admin/enable_perks.py | Python | gpl-2.0 | 1,012 | 0.003953 | from libs import db
import | api.web
from api.urls import handle_api_url
from api import fieldtypes
from api.exceptions import APIException
fro | m libs import config
PRIVILEGED_GROUP_IDS = (18, 5, 4)
@handle_api_url("enable_perks_by_discord_ids")
class UserSearchByDiscordUserIdRequest(api.web.APIHandler):
auth_required = False
sid_required = False
description = "Accessible only to localhost connections, for wormgas."
help_hidden = True
fields = {"discord_user_ids": (fieldtypes.string_list, True)}
def post(self):
if self.request.remote_ip not in config.get("api_trusted_ip_addresses"):
raise APIException("auth_failed", f"{self.request.remote_ip} is not allowed to access this endpoint.")
list_as_tuple = tuple(self.get_argument("discord_user_ids"))
db.c.update("UPDATE phpbb_users SET group_id = 8 WHERE discord_user_id IN %s AND group_id NOT IN %s AND group_id != 8", (list_as_tuple, PRIVILEGED_GROUP_IDS))
self.append_standard("yes")
|
mindriot101/bokeh | examples/reference/models/Diamond.py | Python | bsd-3-clause | 815 | 0.001227 | import numpy as np
from bokeh.models import ColumnDataSource, Plot, LinearAxis, Grid
from bokeh.models.markers import Diamond
from bokeh.io import curdoc, show
N = 9
x = np. | linspace(-2, 2, N)
y = x**2
sizes = np.linspace(10, 20, N)
source = ColumnDataSource(dict(x=x, y=y, sizes=sizes))
plot = Plot(
title=None, plot_width=300, plot_height=300,
h_symmetry=False, v_symmetry=False, min_border=0, toolbar_location=None)
glyph = Diamond(x="x", y="y", size="sizes", line_color="#1c9099", line_width=2, fill_color=None)
plot.add_glyph(source, glyph)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.ad | d_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
curdoc().add_root(plot)
show(plot)
|
pureqml/qmlcore | test/lit.py | Python | mit | 381 | 0.002625 | #!/usr/bin/env python
# https://medium.com/@mshockwave/using-llvm-lit-out | -of-tree-5cddada85a78
# To run lit-based test suite:
# cd xyz/qmlcore/test && ./lit.py -va .
from lit.main import main
import os
if __name__ == '__main__':
if not os.path.exists(".cache/core.I | tem"):
print("Note that first run may take quite a while .cache/core.* is populated...")
main()
|
vfaronov/turq | tests/conftest.py | Python | isc | 3,212 | 0 | import socket
import subprocess
import sys
import time
import h11
import pytest
import requests
@pytest.fixture
def turq_instance():
return TurqInstance()
class TurqInstance:
"""Spins up and controls a live instance of Turq for testing."""
def __init__(self):
self.host = 'localhost'
# Test instance listens on port 13095 instead of the default 13085,
# to make it easier to run tests while also testing Turq manually.
# Of course, ideally it should be a random free port instead.
self.mock_port = 13095
self.editor_port = 13096
self.password = ''
| self.extra_args = []
self.wait = True
self._process = None
self.console_output = None
def __enter__(self):
args = [sys.executable, '-m', 'turq.main',
'--bind', self.host, '--mock-port', str(self.mock_port),
'--editor-port', str(self.editor_port)]
if self.password is not None:
args += ['--editor-password', self.password]
| args += self.extra_args
self._process = subprocess.Popen(args, stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE)
if self.wait:
self._wait_for_server()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._process.terminate()
self._process.wait()
self.console_output = self._process.stderr.read().decode()
return False
def _wait_for_server(self, timeout=3):
# Wait until the mock server starts accepting connections,
# but no more than `timeout` seconds.
t0 = time.monotonic()
while time.monotonic() - t0 < timeout:
time.sleep(0.1)
try:
self.connect().close()
self.connect_editor().close()
return
except OSError:
pass
raise RuntimeError('Turq failed to start')
def connect(self):
return socket.create_connection((self.host, self.mock_port), timeout=5)
def connect_editor(self):
return socket.create_connection((self.host, self.editor_port),
timeout=5)
def send(self, *events):
hconn = h11.Connection(our_role=h11.CLIENT)
with self.connect() as sock:
for event in events:
sock.sendall(hconn.send(event))
sock.shutdown(socket.SHUT_WR)
while hconn.their_state is not h11.CLOSED:
event = hconn.next_event()
if event is h11.NEED_DATA:
hconn.receive_data(sock.recv(4096))
elif not isinstance(event, h11.ConnectionClosed):
yield event
def request(self, method, url, **kwargs):
full_url = 'http://%s:%d%s' % (self.host, self.mock_port, url)
return requests.request(method, full_url, **kwargs)
def request_editor(self, method, url, **kwargs):
full_url = 'http://%s:%d%s' % (self.host, self.editor_port, url)
return requests.request(method, full_url, **kwargs)
|
kb2ma/adagios | adagios/pnp/views.py | Python | agpl-3.0 | 1,427 | 0 | # -*- coding: utf-8 -*-
#
# Adagios is a web based Nagios configuration interface
#
# Copyright (C) 2010, P | all Sigurdsson <palli@opensource.is>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero | General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.context_processors import csrf
from django.shortcuts import render_to_response
from django.shortcuts import HttpResponse
from adagios.pnp.functions import run_pnp
from adagios.views import adagios_decorator
import json
@adagios_decorator
def pnp(request, pnp_command='image'):
c = {}
c['messages'] = []
c['errors'] = []
result = run_pnp(pnp_command, **request.GET)
content_type = "text"
if pnp_command == 'image':
content_type = "image/png"
elif pnp_command == 'json':
content_type = "application/json"
return HttpResponse(result, content_type=content_type)
|
saltmine/redis-gadgets | tests/test_set_theory.py | Python | mit | 17,869 | 0.000112 | """
Tests for set_theory cached multi-set query library
"""
import time
import redis
from nose.tools import (raises, eq_, with_setup, assert_in, assert_not_in,
assert_not_equal, make_decorator)
# TODO: prefix test keys for keyspace find and delete instead of flushing
# TODO: Add missing test doc strings
# TODO: Change "should style" doc strings to affirmative statements
# TODO: Convert to class-based tests
DB_NUM = 15
from redis_gadgets import set_theory
from redis_gadgets import WeightedKey
def run_with_both(func):
"""Decorator to turn a test into a generator that runs the test with both
a Redis and a StrictRedis connection. Decorated tests should take an
argument for the database connection
"""
strict = redis.StrictRedis(db=DB_NUM)
non_strict = redis.Redis(db=DB_NUM)
def gen_new_tests():
yield func, strict
yield func, non_strict
return make_decorator(func)(gen_new_tests)
def _setup():
# force these to strict mode - doesn't matter here, because presumably the
# caller will use the correct call syntax for whichever client they like.
db = redis.StrictRedis(db=DB_NUM)
db.flushdb()
now = time.time()
yesterday = now - 86400
for i in range(10):
db.zadd('SET_A', now + i, i)
for i in range(5, DB_NUM): # slight overlap in ids
db.zadd('SET_B', yesterday + i, i)
def _compound_setup():
# TEST_1: { 0:50, 1:51, ... 9:59}
# TEST_2: { 0:50, 1:51, ... 19:69}
# TEST_3: { 10:70, 11:71, ... 29:79}
# force these to strict mode - doesn't matter here, because presumably the
# caller will use the correct call syntax for whichever client they like.
db = redis.StrictRedis(db=DB_NUM)
db.flushdb()
val = 50
for i in range(30):
if i < 10:
db.zadd('TEST_1', val + i, i)
db.zadd('TEST_2', val + i, i)
elif i < 20:
db.zadd('TEST_2', val + i, i)
db.zadd('TEST_3', val + i, i)
elif i < 30:
db.zadd('TEST_3', val + i, i)
def test_thread_key_names():
import threading
results = []
class KeyRecordingThread(threading.Thread):
def run(self):
results.append(set_theory._unique_id())
threads = []
for i in range(2):
new_thread = KeyRecordingThread()
new_thread.start()
threads.append(new_thread)
while threads:
thread = threads.pop()
thread.join()
if thread.is_alive():
threads.append(thread)
assert_not_equal(results[0], results[1])
@run_with_both
@raises(ValueError)
def test_no_default_start_and_end(db):
'''Start and end are required for non-count queries
'''
st = set_theory.SetTheory(db)
st.zset_fetch([("fake_key:1",)])
@run_with_both
@raises(ValueError)
def test_no_range_for_count(db):
'''Start and end are invalid for count queries
'''
st = set_theory.SetTheory(db)
st.zset_fetch([("fake_key:1",)], count=True, start=1, end=1)
@run_with_both
@raises(ValueError)
def test_return_key_raises_without_ttl(db):
'''You should not be able to return the hash key without a ttl
because it will be deleted before you can use the return value
'''
st = set_theory.SetTheory(db)
st.zset_fetch([('fake_key',)], return_key=True)
@run_with_both
@raises(ValueError)
def test_invalid_weighted_key(db):
"""SetTheory raises ValueError on invalid bind element
"""
st = set_theory.SetTheory(db)
st.zset_fetch([('SET_A', 1.0, 'bogus'), ('SET_B',)],
operator="intersect", count=True)
@with_setup(_setup)
@run_with_both
def test_single_weighted_key(db):
"""Can use WeightedKey directly for single key queries
"""
st = set_theory.SetTheory(db)
key = WeightedKey('SET_A')
eq_(10, st.zset_fetch([key], count=True))
@with_setup(_setup)
@run_with_both
def test_multiple_weighted_key(db):
"""Can use WeightedKey directly for multiple key queries
"""
st = set_theory.SetTheory(db)
key_a = WeightedKey('SET_A')
key_b = WeightedKey('SET_B')
eq_(5, st.zset_fetch([key_a, key_b],
operator="intersect", count=True,
thread_local=True))
@with_setup(_setup)
@run_with_both
def test_simple_thread_safe_count(db):
st = set_theory.SetTheory(db)
a_count = st.zset_fetch([('SET_A',)], count=True,
thread_local=True)
eq_(10, a_count, "GUARD: initial count for SET_A is %s" % a_count)
b_count = st.zset_fetch([('SET_B',)], count=True,
thread_local=True)
eq_(10, b_count, "GUARD: initial count for SET_B is %s" % b_count)
eq_(5, st.zset_fetch([('SET_A',), ('SET_B',)],
operator="intersect", count=True,
thread_local=True))
eq_(15, st.zset_fetch([('SET_A',), ('SET_B',)],
operator="union", count=True,
thread_local=True))
@with_setup(_setup)
@run_with_both
def test_simple_count(db):
st = set_theory.SetTheory(db)
eq_(10, st.zset_fetch([('SET_A',)], count=True))
eq_(10, st.zset_fetch([('SET_B',)], count=True))
eq_(5, st.zset_fetch([('SET_A',), ('SET_B',)],
operator="intersect", count=True), "intersect count was wrong")
eq_(15, st.zset_fetch([('SET_A',), ('SET_B',)],
operator="union", count=True), "union count was wrong")
@with_setup(_setup)
@run_with_both
def test_simple_fetch(db):
st = set_theory.SetTheory(db)
results = st.zset_fetch([('SET_A',)], start=0, end=0,
reverse=False)
print results
assert '0' in results
assert '1' not in results
@with_setup(_setup)
@run_with_both
def test_weighted_intersect(db):
# TODO: Shouldn't this test actually look at the scores?
st = set_theory.SetTheory(db)
results = st.zset_fetch([('SET_A', 1.0), ('SET_B', 2.0)],
operator="intersect", start=0,
end=0)
print results
assert '9' in results
@with_setup(_setup)
@run_with_both
def test_weighted_union(db):
# TODO: Shouldn't this test actually look at the scores?
st = set_theory.SetTheory(db)
results = st.zset_fetch([('SET_A', 1.0), ('SET_B', 2.0)],
operator="union", start=0, end=0)
print results
assert '14' in results
@with_setup(_compound_setup)
@run_with_both
def test_intersect_union(db):
st = set_theory.SetTheory(db)
# temp hash should be (TEST_2 && TEST_3) (10-19 inclusive)
temp_hash = st.zset_fetch([('TEST_2',), ('TEST_3',)],
return_key=True, ttl=5,
operator="intersect")
# now union TEST_1 onto the earlier set and we should have the entire set
# again (0-19 inclusive)
results = st.zset_fetch([(temp_hash,), ('TEST_1',)],
start=0, end=-1,
operator="union")
for i in range(30):
if i < 20:
assert_in(str(i), results)
else:
assert_not_in(str(i), results)
@with_setup(_compound_setup)
@run_with_both
def test_union_intersect(db):
# first make an unweighted union as a control
st = set_theory.Se | tTheory(db)
temp_hash_control = st.zset_fetch([('TEST_1',), ('TEST_3',)],
return_key=True, ttl=5,
operator="union")
control_results = st.zset_fetch([(temp_hash_control,),
('TEST_2',)], start=0,
end=-1,
operator="intersect" | )
eq_('19', control_results[0],
"GUARD: 19 not first element of %s" % control_results)
# now for the actual weighting experiment
# now make a weighted union to test TEST_1 trumps TEST_3
temp_hash_weighted = st.zset_fetch([('TEST_1', 1000), ('TEST_3',)],
return_key=True, ttl=5,
operator="union")
experiment_results = st.zset_fetch([(temp_hash_weighted,),
('TEST_2',)], start=0,
|
google-code-export/dojango | dojango/decorators.py | Python | bsd-3-clause | 5,423 | 0.007376 | from django.http import HttpResponseNotAllowed, HttpResponseServerError
from django.utils import simplejson as json
from util import to_json_response
from util import to_dojo_data
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.3, 2.4 fallback.
def expect_post_request(func):
"""Allow only POST requests to come in, throw an exception otherwise.
This relieves from checking every time that the request is
really a POST request, which it should be when using this
decorator.
"""
def _ret(*args, **kwargs):
ret = func(*args, **kwargs)
request = args[0]
if not request.method=='POST':
return HttpResponseNotAllowed(['POST'])
return ret
return _ret
def add_request_getdict(func):
"""Add the method getdict() to the request object.
This works just like getlist() only that it decodes any nested
JSON encoded object structure.
Since sending deep nested structures is not possible via
GET/POST by default, this enables it. Of course you need to
make sure that on the JavaScript side you are also sending
the data properly, which dojango.send() automatically does.
Example:
this is being sent:
one:1
two:{"three":3, "four":4}
using
request.POST.getdict('two')
returns a dict containing the values sent by the JavaScript.
"""
def _ret(*args, **kwargs):
args[0].POST.__class__.getdict = __getdict
ret = func(*args, **kwargs)
return ret
return _ret
def __getdict(self, key):
ret = self.get(key)
try:
ret = json.loads(ret)
except ValueError: # The value was not JSON encoded :-)
raise Exception('"%s" was not JSON encoded as expected (%s).' % (key, str(ret)))
return ret
def json_response(func):
"""
A simple json response decorator. Use it on views, where a python data object should be converted
to a json response:
@json_response
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
"""
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret)
return wraps(func)(inner)
def jsonp_response_custom(callback_param_name):
"""
A jsonp (JSON with Padding) response decorator, where you can define your own callbackParamName.
It acts like the json_response decorator but with the difference, that it
wraps the returned json string into a client-specified function name (that is the Padding).
You can add this decorator to a function like that:
@jsonp_response_custom("my_callback_param")
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
Your now can access this view from a foreign URL using JSONP.
An example with Dojo looks like that:
dojo.io.script.get({ url:"http://example.com/my_url/",
callbackParamName:"my_callback_param",
load: function(response){
console.log(response);
}
});
Note: the callback_param_name in the decorator and in your JavaScript JSONP call must be the same.
"""
def decorator(func):
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret, callback_param_name=callback_param_name)
return wraps(func)(inner)
return decorator
jsonp_response = jsonp_response_custom("jsonp_callback")
jsonp_response.__doc__ = "A predefined jsonp response decorator using 'jsoncallback' as a fixed callback_param_name."
def json_iframe_response(func):
"""
A simple json response decorator but wrapping the json response into a html page.
It helps when doing a json request using an iframe (e.g. file up-/download):
@json_iframe
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
"""
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret, use_iframe=True)
return wraps(func)(inner)
def __prepare_json_ret(request, ret, callback_param_name=None, use_iframe=False):
if ret==False:
ret = {'success':False}
elif ret==None: # Sometimes there is no return.
ret = {}
# Add the 'ret'=True, since it was obviously no set yet and we got valid data, no exception.
func_name = None
if callback_param_name:
func_name = request.GET.get(callback_param_name, "callbackParamName")
try:
if not ret.has_key('success'):
ret['success'] = True
except AttributeError, e:
raise Exception("The returned data of your function must be a dictionary!")
json_ret = ""
try:
# Sometimes the serialization fails, i.e. when there are too deep | ly nested objects or even classes inside
json_ret = to_json_response(ret, func_name, use_iframe)
except E | xception, e:
print '\n\n===============Exception=============\n\n'+str(e)+'\n\n'
print ret
print '\n\n'
return HttpResponseServerError(content=str(e))
return json_ret
|
Boussadia/weboob | modules/societegenerale/browser.py | Python | agpl-3.0 | 4,686 | 0.001707 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Jocelyn Jaubert
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.browser import BaseBrowser, BrowserIncorrectPassword, BrowserUnavailable
from .pages.accounts_list import AccountsList, AccountHistory, CardsList
from .pages.login import LoginPage, BadLoginPage
__all__ = ['SocieteGenerale']
class SocieteGenerale(BaseBrowser):
DOMAIN_LOGIN = 'particuliers.societegenerale.fr'
CERTHASH_LOGIN = ['72b78ce0b8ffc63a6dcbf8fc375a1ab5502d5dfefcac1d00901a73f5a94e9ed5', '629873f98004aef6c42d15714ff9689fdfce04746483122417f432cd693f5007']
DOMAIN = 'particuliers.secure.societegenerale.fr'
CERTHASH = '4499ca391d0d690050d80e625fd0b16e83476fd565d8e43315c7a9c025f02b88'
PROTOCOL = 'https'
ENCODING = None # refer to the HTML encoding
PAGES = {
'https://particuliers.societegenerale.fr/.*': LoginPage,
'https://.*.societegenerale.fr//acces/authlgn.html': BadLoginPage,
'https://.*.societegenerale.fr/error403.html': BadLoginPage,
'.*restitution/cns_listeprestation.html': AccountsList,
'.*restitution/cns_listeCartes.*.html.*': CardsList,
'.*restitution/cns_detail.*\.html.*': AccountHistory,
'https://.*.societegenerale.fr/lgn/url.html.*':AccountHistory,
}
def __init__(self, *args, **kwargs):
self.lowsslcheck(self.DOMAIN_LOGIN, self.CERTHASH_LOGIN)
BaseBrowser.__init__(self, *args, **kwargs)
def home(self):
self.location('https://' + self.DOMAIN_LOGIN + '/index.html')
def is_logged(self):
if not self.page or self.is_on_page(LoginPage):
return False
error = self.page.get_error()
if error is None:
return True
return False
def login(self):
assert isinstance(self.username, basestring)
assert isinstance(self.password, basestring)
assert self.password.isdigit()
if not self.is_on_page(LoginPage):
self.location('https://' + self.DOMAIN_LOGIN + '/index.html', no_login=True)
self.page.login(self.username, self.password)
if self.is_on_page(LoginPage):
raise BrowserIncorrectPassword()
if self.is_on_page(BadLoginPage):
error = self.page.get_error()
if error is None:
raise BrowserIncorrectPassword()
elif error.startswith('Votre session a'):
raise BrowserUnavailable('Session has expired')
elif error.startswith('Le service est momentan'):
raise BrowserUnavailable(error)
else:
raise BrowserIncorrectPassword(error)
def get_accounts_list(self):
if not self.is_on_page(AccountsList):
self.location('/restitution/cns_listeprestation.html')
return self.page.get_list()
def get_account(self, id):
assert isinstance(id, basestring)
if not self.is_on_page(AccountsList):
self.location('/restitution/cns_listeprestation.html')
for a in self.page.get_list():
if a.id == id:
return a
return None
def iter_history(self, account):
self.location(account._link_id)
transactions = []
if self.is_on_page( | CardsList):
for card_link in self.page.iter_cards():
self.location(card_link)
transactions += list(self.page.iter_transactions(coming=True))
elif self.is_on_page(AccountHistory):
transactions += list(self.page.iter_transactions(coming=(accoun | t.type == account.TYPE_CARD)))
else:
self.logger.warning('This account is not supported')
def key(tr):
# Can't compare datetime and date, so cast them.
try:
return tr.rdate.date()
except AttributeError:
return tr.rdate
transactions.sort(key=key, reverse=True)
return iter(transactions)
|
we-inc/mms-snow-white-and-the-seven-pandas | webserver/apps/users/migrations/0001_initial.py | Python | mit | 2,969 | 0.004715 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-27 19:05
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('email', models.EmailField(max_ | length=254, verbose_name='Email')),
('first_name', models.CharField(max_length=50, verbose_name='First Name')),
('last_name', models.CharField(max_length=50, verbose_name='Last Name')),
('phone', models.CharField(max_length=20, verbose_name='Phone')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this us | er belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
open-telemetry/opentelemetry-python | exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/__init__.py | Python | apache-2.0 | 2,513 | 0.000398 | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This library allows to export tracing data to an OTLP collector.
Usage
-----
The **OTLP Span Exporter** allows to export `OpenTelemetry`_ traces to the
`OTLP`_ collector.
You can configure the exporter with the following environment variables:
- :envvar:`OTEL_EXPORTER_OTLP_TRACES_TIMEOUT`
- :envvar:`OTEL_EXPORTER_OTLP_TRACES_PROTOCOL`
- :envvar:`OTEL_EXPORTER_OTLP_TRACES_HEADERS`
- :envvar:`OTEL_EXPORTER_OTLP_TRACES_ENDPOINT`
- :envvar:`OTEL_EXPORTER_OTLP_TRACES_COMPRESSION`
- :envvar:`OTEL_EXPOR | TER_OTLP_TRACES_CERTIFICATE`
- :envvar:`OTEL_EXPORTER_OTLP_TIMEOUT`
- :envvar:`OTEL_EXPORTER_OTLP_PROTOCOL`
- :envvar:`OTEL_EXPORTER_OTLP_HEADERS`
- :envvar:`OTEL_EXPORTER_OTLP_ENDPOINT`
- :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION`
- :envvar:`OTEL_EXPORTER_OTLP_CERTIFICATE`
.. _OTLP: https://github.com/open-telemetry/opentelemetry-collector/
.. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/
.. code:: python
from opentelemetry | import trace
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
# Resource can be required for some backends, e.g. Jaeger
# If resource wouldn't be set - traces wouldn't appears in Jaeger
resource = Resource(attributes={
"service.name": "service"
})
trace.set_tracer_provider(TracerProvider(resource=resource))
tracer = trace.get_tracer(__name__)
otlp_exporter = OTLPSpanExporter()
span_processor = BatchSpanProcessor(otlp_exporter)
trace.get_tracer_provider().add_span_processor(span_processor)
with tracer.start_as_current_span("foo"):
print("Hello world!")
API
---
"""
import enum
class Compression(enum.Enum):
NoCompression = "none"
Deflate = "deflate"
Gzip = "gzip"
|
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.5/django/forms/widgets.py | Python | bsd-3-clause | 35,106 | 0.002193 | """
HTML Widget classes
"""
from __future__ import absolute_import, unicode_literals
import copy
import datetime
from itertools import chain
try:
from urllib.parse import urljoin
except ImportError: # Python 2
from urlparse import urljoin
from django.conf import settings
from django.forms.util import flatatt, to_current_timezone
from django.utils.datastructures import MultiValueDict, MergeDict
from django.utils.html import conditional_escape, format_html, format_html_join
from django.utils.translation import ugettext, ugettext_lazy
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.utils import datetime_safe, formats, six
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'PasswordInput',
'HiddenInput', 'MultipleHiddenInput', 'ClearableFileInput',
'FileInput', 'DateInput', 'DateTimeInput', 'TimeInput', 'Textarea', 'CheckboxInput',
'Select', 'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget',
'SplitDateTimeWidget',
)
MEDIA_TYPES = ('css','js')
@python_2_unicode_compatible
class Media(object):
def __init__(self, media=None, **kwargs):
if media:
| media_attrs = media.__dict__
else:
media_attrs = kwargs
self._cs | s = {}
self._js = []
for name in MEDIA_TYPES:
getattr(self, 'add_' + name)(media_attrs.get(name, None))
# Any leftover attributes must be invalid.
# if media_attrs != {}:
# raise TypeError("'class Media' has invalid attribute(s): %s" % ','.join(media_attrs.keys()))
def __str__(self):
return self.render()
def render(self):
return mark_safe('\n'.join(chain(*[getattr(self, 'render_' + name)() for name in MEDIA_TYPES])))
def render_js(self):
return [format_html('<script type="text/javascript" src="{0}"></script>', self.absolute_path(path)) for path in self._js]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css.keys())
return chain(*[
[format_html('<link href="{0}" type="text/css" media="{1}" rel="stylesheet" />', self.absolute_path(path), medium)
for path in self._css[medium]]
for medium in media])
def absolute_path(self, path, prefix=None):
if path.startswith(('http://', 'https://', '/')):
return path
if prefix is None:
if settings.STATIC_URL is None:
# backwards compatibility
prefix = settings.MEDIA_URL
else:
prefix = settings.STATIC_URL
return urljoin(prefix, path)
def __getitem__(self, name):
"Returns a Media object that only contains media of the given type"
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
def add_js(self, data):
if data:
for path in data:
if path not in self._js:
self._js.append(path)
def add_css(self, data):
if data:
for medium, paths in data.items():
for path in paths:
if not self._css.get(medium) or path not in self._css[medium]:
self._css.setdefault(medium, []).append(path)
def __add__(self, other):
combined = Media()
for name in MEDIA_TYPES:
getattr(combined, 'add_' + name)(getattr(self, '_' + name, None))
getattr(combined, 'add_' + name)(getattr(other, '_' + name, None))
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend == True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
else:
return Media(definition)
else:
return base
return property(_media)
class MediaDefiningClass(type):
"Metaclass for classes that can have media definitions"
def __new__(cls, name, bases, attrs):
new_class = super(MediaDefiningClass, cls).__new__(cls, name, bases,
attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
@python_2_unicode_compatible
class SubWidget(object):
"""
Some widgets are made of multiple HTML elements -- namely, RadioSelect.
This is a class that represents the "inner" HTML element of a widget.
"""
def __init__(self, parent_widget, name, value, attrs, choices):
self.parent_widget = parent_widget
self.name, self.value = name, value
self.attrs, self.choices = attrs, choices
def __str__(self):
args = [self.name, self.value, self.attrs]
if self.choices:
args.append(self.choices)
return self.parent_widget.render(*args)
class Widget(six.with_metaclass(MediaDefiningClass)):
is_hidden = False # Determines whether this corresponds to an <input type="hidden">.
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
def subwidgets(self, name, value, attrs=None, choices=()):
"""
Yields all "subwidgets" of this widget. Used only by RadioSelect to
allow template access to individual <input type="radio"> buttons.
Arguments are the same as for render().
"""
yield SubWidget(self, name, value, attrs, choices)
def render(self, name, value, attrs=None):
"""
Returns this Widget rendered as HTML, as a Unicode string.
The 'value' given is not guaranteed to be valid input, so subclass
implementations should program defensively.
"""
raise NotImplementedError
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
attrs = dict(self.attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, returns the value
of this widget. Returns None if it's not provided.
"""
return data.get(name, None)
def _has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or inital value we get
# is None, replace it w/ ''.
if data is None:
data_value = ''
else:
data_value = data
if initial is None:
initial_value = ''
else:
initial_value = initial
if force_text(initial_value) != force_text(data_value):
return True
return False
def id_for_label(self, id_):
"""
Returns the HTML ID attribute of this Widget for use by a <label>,
given the ID of the fiel |
hackcyprus/jobber | schema/versions/26e3340ce799_create_categories_ta.py | Python | mit | 961 | 0.001041 | """create categories table
Revision ID: 26e3340ce799
Revi | ses: 34a1034caa3b
Create Date: 2013-11-02 10:46:23.617628
"""
from alembic import op
import sqlalchemy as sa
import json
revision = '26e3340ce799'
down | _revision = '34a1034caa3b'
# Load categories fixture file.
# TODO: create an abstraction for this.
with open('schema/initial/categories.json') as fixture:
categories = json.loads(fixture.read())
def upgrade():
"""Creates and populates the `categories` table."""
columns = (
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('created', sa.DateTime(timezone=True)),
sa.Column('name', sa.Unicode(50), nullable=False),
sa.Column('slug', sa.Unicode(125), nullable=False, unique=True, index=True)
)
op.create_table('categories', *columns)
op.bulk_insert(sa.sql.table('categories', *columns), categories)
def downgrade():
"""Drops the `jobs` table."""
op.drop_table('categories')
|
trep/opentrep | gui/django/webapps/opentrep_w_db/api/handlers.py | Python | lgpl-2.1 | 1,494 | 0.031459 | from piston.handler import BaseHandler
import json
#
class TrepHandler (BaseHandler):
def read (self, r | equest, airportCodeURL=None):
# Default values, corresponding to the default inventory dump
# file loaded by the inventory server
airportCode = 'LHR'
# Extract the requested action from the query string (if any)
airportCodeQuery = request.GET.get ('ac')
# If the airport parameter i | s given within the URL (e.g.,
# http://localhost:8000/api/display/place/SFO)
# take it into account
if airportCodeQuery: airportCode = airportCodeQuery
# If the airport parameter is given as part of the query
# string (e.g., http://localhost:8000/api/display/place?ac=LAX)
# take it into account. It has the precedence over the
# parameter defined within the URL. That is, with
# http://localhost:8000/api/display/place/RIO?ac=FRA
# the following parameter would be kept: FRA
if airportCodeURL: airportCode = airportCodeURL
# Compose a JSON Python object with the parameters
jsonAction = json.dumps ({'display': { 'airport_code': airportCode }})
# print 'JSON serialised request: ', jsonAction
# TODO: do not harcode the answer. Actually search for the airport instead.
jsonMessage = jsonAction
message = json.loads (jsonMessage)
# print 'Received reply ', request, '[', jsonMessage, ']'
#TESTING --- UNCOMMENT FOR NORMAL OPERATION ---
#return message
print 'Message: ', message
#REMOVE OR COMMENT FOR NORMAL OPERATION ---
return message
|
pkimber/crm | example_crm/base.py | Python | apache-2.0 | 6,691 | 0 | # -*- encoding: utf-8 -*-
""" Django settings """
from django.core.urlresolvers import reverse_lazy
DEBUG = True
TESTING = False
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
ADMINS = (
('admin', 'code@pkimber.net'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = 'media'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = 'web_static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'w@t8%tdwyi-n$u_s#4_+cwnq&6)1n)l3p-qe(ziala0j^vo12d'
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'reversion.middleware.RevisionMiddleware',
)
ROOT_URLCONF = 'example_crm.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'example_crm.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
'string_if_invalid': '**** INVALID EXPRESSION: %s ****',
},
},
]
DJANGO_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
THIRD_PARTY_APPS = (
# 'debug_toolbar',
# 'django_extensions',
# 'mptt',
'easy_thumbnails',
'taggit',
'rest_framework',
# http://www.django-rest-framework.org/api-guide/authentication#tokenauthentication
'rest_framework.authtoken',
'reversion',
)
LOCAL_APPS = (
'base',
'block',
'contact',
'crm',
'example_crm',
'finance',
'invoice',
'login',
'mail',
'report',
'stock',
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# URL where requests are redirected after login when the contrib.auth.login
# view gets no next parameter.
LOGIN_REDIRECT_URL = reverse_lazy('project.dash')
CONTACT_MODEL = 'contact.Contact'
# Django debug toolbar
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
'INTERC | EPT_REDIRECTS': False,
'ENABLE_STACKTRACES': True,
}
# http://www.django-rest-framework.org/api-guide/authentication#tokenauthentication
REST_FRAMEWORK = {
'COERCE_DECIMAL_TO_STRING': True,
# not sure if this is required or not
# | 'DATETIME_FORMAT': '%Y%m%dT%H%M%SZ',
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAdminUser',
),
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
}
# https://github.com/johnsensible/django-sendfile
SENDFILE_BACKEND = 'sendfile.backends.development'
SENDFILE_ROOT = 'media-private'
THUMBNAIL_DEBUG = DEBUG
FTP_STATIC_DIR = None
FTP_STATIC_URL = None
|
Antelox/mhn | server/mhn/common/clio.py | Python | lgpl-2.1 | 16,259 | 0.004182 | """
Clio
Mnemosyne Client Library
ThreatStream 2014
"""
import pymongo
from dateutil.parser import parse as parse_date
from collections import Counter
from bson import ObjectId, son
import json
import datetime
class Clio():
"""
Main interface for Clio - Mnemosyne Client Library -
Usage:
clio = Clio()
sessions = clio.session.get(source_ip='5.15.15.85')
"""
def __init__(self):
self.client = pymongo.MongoClient()
@property
def session(self):
return Session(self.client)
@property
def counts(self):
return Counts(self.client)
@property
def session_protocol(self):
return SessionProtocol(self.client)
@property
def hpfeed(self):
return HpFeed(self.client)
@property
def authkey(self):
return AuthKey(self.client)
@property
def url(self):
return Url(self.client)
@property
def file(self):
return File(self.client)
@property
def dork(self):
return Dork(self.client)
@property
def metadata(self):
return Metadata(self.client)
class ResourceMixin(object):
db_name = 'mnemosyne'
expected_filters = ('_id',)
def __init__(self, client=None, **kwargs):
self.client = client
for attr in self.__class__.expected_filters:
setattr(self, attr, kwargs.get(attr))
def __call__(self, *args, **kwargs):
return self.get(*args, **kwargs)
@classmethod
def _clean_query(cls, dirty):
clean = dict()
for arg in cls.expected_filters:
# Creating a query dictionary
# with values passed in kwargs.
if dirty.get(arg):
clean[arg] = dirty.get(arg)
if 'hours_ago' in dirty:
clean['timestamp'] = {
'$gte': datetime.datetime.utcnow() - datetime.timedelta(hours=int(dirty['hours_ago']))
}
return clean
@classmethod
def _clean_options(cls, opts):
try:
skip = int(opts.get('skip', 0))
except (ValueError, TypeError):
skip = 0
limit = opts.get('limit', None)
# If limit was not indicated, we'll leave it as 'None'.
if limit:
try:
limit = int(limit)
except (ValueError, TypeError):
# Limit provided but wrong value,
# give a default value.
limit = 20
order_by = opts.get('order_by', None)
# If order_by wasn't passed, we'll return an empty dict.
if order_by:
# Figure out desired direction from order_by value.
if order_by.startswith('-'):
direction = pymongo.DESCENDING
else:
direction = pymongo.ASCENDING
# Clean up direction from field name.
order_by = order_by.replace('-', '')
if order_by not in cls.expected_filters:
# Clean up field is not valid.
order_by = None
else:
# Returns the argumens needed by sort() call.
order_by = (order_by, direction,)
return skip, limit, order_by
def new(self, **kwargs):
return self.__class__.from_dict(kwargs, self.client)
def to_dict(self):
todict = {}
for attr in self.__class__.expected_filters:
todict[attr] = getattr(self, attr)
if isinstance(todict[attr], datetime.datetime):
todict[attr] = todict[attr].isoformat()
# Making sure dict is json serializable.
todict['_id'] = str(todict['_id'])
return todict
def get(self, options={}, **kwargs):
if self.client is None:
raise ValueError
else:
if '_id' in kwargs:
kwargs['_id'] = ObjectId(kwargs['_id'])
return self.__class__.from_dict(
self.collection.find_one(kwargs), self.client)
query = self.__class__._clean_query(kwargs)
queryset = self.collection.find(query)
if options:
skip, limit, order_by = self.__class__._clean_options(options)
if skip:
queryset = queryset.skip(skip)
if limit:
queryset = queryset.limit(limit)
if order_by:
queryset = queryset.sort(*order_by)
return (self.__class__.from_dict(f, self.client) for f in queryset)
def delete(self, **kwargs):
query = dict()
if kwargs:
query = self.__class__._clean_query(kwargs)
elif self._id:
query = {'_id': self._id}
else:
# Need to be at least a valid resource or
# pass keyword arguments.
return None
return self.collection.remove(query)
def count(self, **kwargs):
query = self.__class__._clean_query(kwargs)
# Just counting the results.
return self.collection.find(query).count()
@property
def collection(self):
"""Shortcut for getting the appropriate collection object"""
cls = self.__class__
return self.client[cls.db_name][cls.collection_name]
@classmethod
def from_dict(cls, dict_, client=None):
"""
Returns an object from a dictionary, most likely
to come from pymongo results.
"""
if dict_ is None:
# Invalid dict incoming.
return None
doc = cls(client)
attrs = dict_.keys()
for at in attrs:
# Set every key in dict_ as attribute in the object.
setattr(doc, at, dict_.get(at))
return doc
class Counts(ResourceMixin):
collection_name = 'counts'
expected_filters = ('ident | ifier', 'date', 'event_count',)
def get_count(self, identifi | er, date=None):
query = {'identifier': identifier}
if date:
query['date'] = date
return int(sum([rec['event_count'] for rec in self.collection.find(query)]))
class Session(ResourceMixin):
collection_name = 'session'
expected_filters = ('protocol', 'source_ip', 'source_port',
'destination_ip', 'destination_port',
'honeypot', 'timestamp', '_id', 'identifier',)
@classmethod
def _clean_query(cls, dirty):
clean = super(Session, cls)._clean_query(dirty)
def date_to_datetime(d):
return datetime.datetime.combine(d, datetime.datetime.min.time())
def clean_integer(field_name, query):
# Integer fields in mongo need to be int type, GET queries
# are passed as str so this method converts the str to
# integer so the find() call matches properly.
# If it's not a proper integer it will be remove
# from the query.
try:
integer = int(query[field_name])
except (ValueError, TypeError):
query.pop(field_name)
else:
query[field_name] = integer
finally:
return query
intfields = ('destination_port', 'source_port',)
for field in intfields:
if field in clean.copy():
clean = clean_integer(field, clean)
if 'timestamp' in clean and isinstance(clean['timestamp'], basestring):
# Transforms timestamp queries into
# timestamp_lte queries.
try:
timestamp = parse_date(clean.pop('timestamp'))
except (ValueError, TypeError):
pass
else:
clean['timestamp'] = {
'$gte': date_to_datetime(timestamp.date()),
'$lt': date_to_datetime(timestamp.date() + datetime.timedelta(days=1))
}
return clean
def _tops(self, fields, top=5, hours_ago=None, **kwargs):
if isinstance(fields, basestring):
fields = [fields,]
match_query = dict([ (field, {'$ne': None}) for field in fields ])
for name, value in kwargs.items():
|
pydanny/dj-paginator | example_project/example_project/settings.py | Python | bsd-3-clause | 2,669 | 0 | """
Django settings for example_project project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'CHANGE-ME--THIS-IS-JUST-AN-EXAMPLE-PROJECT-FOR-TESTING'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'example_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'example_proj | ect.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': | os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
edwardbadboy/vdsm-ubuntu | tests/netinfoTests.py | Python | gpl-2.0 | 7,176 | 0 | #
# Copyright 2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import os
from shutil import rmtree
import tempfile
from xml.dom import minidom
import ethtool
from vdsm import netinfo
from monkeypatch import MonkeyPatch, MonkeyPatchScope
from testrunner import VdsmTestCase as TestCaseBase
# speeds defined in ethtool
ETHTOOL_SPEEDS = set([10, 100, 1000, 2500, 10000])
class TestNetinfo(TestCaseBase):
def testNetmaskConversions(self):
path = os.path.join(os.path.dirname(__file__), "netmaskconversions")
with open(path) as netmaskFile:
for line in netmaskFile:
if line.startswith('#'):
continue
bitmask, address = [value.strip() for value in line.split()]
self.assertEqual(netinfo.prefix2netmask(int(bitmask)),
address)
self.assertRaises(ValueError, netinfo.prefix2netmask, -1)
self.assertRaises(ValueError, netinfo.prefix2netmask, 33)
def testSpeedInvalidNic(self):
nicName = 'DUMMYNICDEVNAME'
self.assertTrue(nicName not in netinfo.nics())
s = netinfo.speed(nicName)
self.assertEqual(s, 0)
def testSpeedInRange(self):
for d in netinfo.nics():
s = netinfo.speed(d)
self.assertFalse(s < 0)
self.assertTrue(s in ETHTOOL_SPEEDS or s == 0)
def testIntToAddress(self):
num = [0, 1, 16777344, 16777408, 4294967295]
ip = ["0.0.0.0", "1.0.0.0", "128.0.0.1",
"192.0.0.1", "255.255.255.255"]
for n, addr in zip(num, ip):
self.assertEqual(addr, netinfo.intToAddress(n))
def testIPv6StrToAddress(self):
inputs = [
'00000000000000000000000000000000',
'00000000000000000000000000000001',
'20010db8000000000001000000000002',
'20010db8aaaabbbbccccddddeeeeffff',
'fe80000000000000be305bbffec58446']
ip = [
'::',
'::1',
'2001:db8::1:0:0:2',
'2001:db8:aaaa:bbbb:cccc:dddd:eeee:ffff',
'fe80::be30:5bbf:fec5:8446']
for s, addr in zip(inputs, ip):
self.assertEqual(addr, netinfo.ipv6StrToAddress(s))
@MonkeyPatch(netinfo, 'networks', lambda: {'fake': {'bridged': True}})
def testGetNonExistantBridgeInfo(self):
# Getting info of non existing bridge should not raise an exception,
# just log a traceback. If it raises an exception the test will fail as
# it should.
netinfo.get()
def testMatchNicName(self):
self.assertTrue(netinfo._match_name('test1', ['test0', 'test1']))
def testIPv4toMapped(self):
self.assertEqual('::ffff:127.0.0.1', netinfo.IPv4toMapped('127.0.0.1'))
def testGetIfaceByIP(self):
for dev in ethtool.get_interfaces_info(ethtool.get_active_devices()):
ipaddrs = map(
lambda etherinfo_ipv6addr: etherinfo_ipv6addr.address,
dev.get_ipv6_addresses())
ipaddrs.append(dev.ipv4_address)
for ip in ipaddrs:
self.assertEqual(dev.device, netinfo.getIfaceByIP(ip))
def _dev_dirs_setup(self, dir_fixture):
"""
Creates test fixture which is a dir structure:
em, me, fake0, fake1 devices that should managed by vdsm.
hid0, hideons not managed by being hidden nics.
jbond not managed by being hidden bond.
me0, me1 not managed by being nics enslaved to jbond hidden bond.
/tmp/.../em/device
/tmp/.../me/device
/tmp/.../fake0
/tmp/.../fake
/tmp/.../hid0/device
/tmp/.../hideous/device
/tmp/.../me0/device
/tmp/.../me1/device
returns related containing dir.
"""
dev_dirs = [os.path.join(dir_fixture, dev) for dev in
('em/device', 'me/device', 'fake', 'fake0',
'hid/device', 'hideous/device',
'me0/device', 'me1/device')]
for dev_dir in dev_dirs:
os.makedirs(dev_dir)
| bonding_path = os.path.join(di | r_fixture, 'jbond/bonding')
os.makedirs(bonding_path)
with open(os.path.join(bonding_path, 'slaves'), 'w') as f:
f.write('me0 me1')
return dir_fixture
def _config_setup(self):
"""
Returns an instance of a config stub.
With patterns:
* hid* for hidden nics.
* fake* for fake nics.
* jb* for hidden bonds.
"""
class Config(object):
def get(self, unused_vars, key):
if key == 'hidden_nics':
return 'hid*'
elif key == 'fake_nics':
return 'fake*'
else:
return 'jb*'
return Config()
def testNics(self):
temp_dir = tempfile.mkdtemp()
with MonkeyPatchScope([(netinfo, 'BONDING_SLAVES',
temp_dir + '/%s/bonding/slaves'),
(netinfo, 'NET_PATH',
self._dev_dirs_setup(temp_dir)),
(netinfo, 'config', self._config_setup())]):
try:
self.assertEqual(set(netinfo.nics()),
set(['em', 'me', 'fake', 'fake0']))
finally:
rmtree(temp_dir)
def testGetBandwidthQos(self):
notEmptyDoc = minidom.parseString("""<bandwidth>
<inbound average='4500' burst='5400' />
<outbound average='4500' burst='5400' peak='101' />
</bandwidth>""")
expectedQosNotEmpty = netinfo._Qos(inbound={'average': '4500',
'burst': '5400',
'peak': ''},
outbound={'average': '4500',
'burst': '5400',
'peak': '101'})
emptyDoc = minidom.parseString("<whatever></whatever>")
self.assertEqual(expectedQosNotEmpty,
netinfo._parseBandwidthQos(notEmptyDoc))
self.assertEqual(netinfo._Qos('', ''),
netinfo._parseBandwidthQos(emptyDoc))
|
salv-orlando/MyRepo | nova/tests/api/ec2/test_middleware.py | Python | apache-2.0 | 4,748 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
import webob.dec
import webob.exc
from nova.api import ec2
from nova import context
from nova import exception
from nova import flags
from nova import test
from nova import utils
from xml.etree.ElementTree import fromstring as xml_to_tree
FLAGS = flags.FLAGS
@webob.dec.wsgify
def conditional_forbid(req):
"""Helper wsgi app returns 403 if param 'die' is 1."""
if 'die' in req.params and req.params['die'] == '1':
raise webob.exc.HTTPForbidden()
return 'OK'
class LockoutTestCase(test.TestCase):
"""Test case for the Lockout middleware."""
def setUp(self): # pylint: disable=C0103
super(LockoutTestCase, self).setUp()
utils.set_time_override()
self.lockout = ec2.Lockout(conditiona | l_forbid)
def tearDown(self): # pylint: disable=C0103
utils.clear_time_override()
super(LockoutTe | stCase, self).tearDown()
def _send_bad_attempts(self, access_key, num_attempts=1):
"""Fail x."""
for i in xrange(num_attempts):
req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key)
self.assertEqual(req.get_response(self.lockout).status_int, 403)
def _is_locked_out(self, access_key):
"""Sends a test request to see if key is locked out."""
req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key)
return (req.get_response(self.lockout).status_int == 403)
def test_lockout(self):
self._send_bad_attempts('test', FLAGS.lockout_attempts)
self.assertTrue(self._is_locked_out('test'))
def test_timeout(self):
self._send_bad_attempts('test', FLAGS.lockout_attempts)
self.assertTrue(self._is_locked_out('test'))
utils.advance_time_seconds(FLAGS.lockout_minutes * 60)
self.assertFalse(self._is_locked_out('test'))
def test_multiple_keys(self):
self._send_bad_attempts('test1', FLAGS.lockout_attempts)
self.assertTrue(self._is_locked_out('test1'))
self.assertFalse(self._is_locked_out('test2'))
utils.advance_time_seconds(FLAGS.lockout_minutes * 60)
self.assertFalse(self._is_locked_out('test1'))
self.assertFalse(self._is_locked_out('test2'))
def test_window_timeout(self):
self._send_bad_attempts('test', FLAGS.lockout_attempts - 1)
self.assertFalse(self._is_locked_out('test'))
utils.advance_time_seconds(FLAGS.lockout_window * 60)
self._send_bad_attempts('test', FLAGS.lockout_attempts - 1)
self.assertFalse(self._is_locked_out('test'))
class ExecutorTestCase(test.TestCase):
def setUp(self):
super(ExecutorTestCase, self).setUp()
self.executor = ec2.Executor()
def _execute(self, invoke):
class Fake(object):
pass
fake_ec2_request = Fake()
fake_ec2_request.invoke = invoke
fake_wsgi_request = Fake()
fake_wsgi_request.environ = {
'nova.context': context.get_admin_context(),
'ec2.request': fake_ec2_request,
}
return self.executor(fake_wsgi_request)
def _extract_message(self, result):
tree = xml_to_tree(result.body)
return tree.findall('./Errors')[0].find('Error/Message').text
def test_instance_not_found(self):
def not_found(context):
raise exception.InstanceNotFound(instance_id=5)
result = self._execute(not_found)
self.assertIn('i-00000005', self._extract_message(result))
def test_snapshot_not_found(self):
def not_found(context):
raise exception.SnapshotNotFound(snapshot_id=5)
result = self._execute(not_found)
self.assertIn('snap-00000005', self._extract_message(result))
def test_volume_not_found(self):
def not_found(context):
raise exception.VolumeNotFound(volume_id=5)
result = self._execute(not_found)
self.assertIn('vol-00000005', self._extract_message(result))
|
zedshaw/learn-python3-thw-code | ex52/gothonweb/gothonweb/planisphere.py | Python | mit | 4,537 | 0.002425 |
class Room(object):
def __init__(self, name, description):
self.name = name
self.description = description
self.paths = {}
def go(self, direction):
return self.paths.get(direction, None)
def add_paths(self, paths):
self.paths.update(paths)
central_corridor = Room("Central Corridor",
"""
The | Gothons of Planet Percal #25 have invaded your ship and destroyed
your entire crew. You are the last surviving member and your last
mission is to get the neutron destruct bomb from the Weapons Armory,
put it in the bridge, and blow the ship up after getting i | nto an
escape pod.
You're running down the central corridor to the Weapons Armory when
a Gothon jumps out, red scaly skin, dark grimy teeth, and evil clown costume
flowing around his hate filled body. He's blocking the door to the
Armory and about to pull a weapon to blast you.
""")
laser_weapon_armory = Room("Laser Weapon Armory",
"""
Lucky for you they made you learn Gothon insults in the academy.
You tell the one Gothon joke you know:
Lbhe zbgure vf fb sng, jura fur fvgf nebhaq gur ubhfr, fur fvgf nebhaq gur ubhfr.
The Gothon stops, tries not to laugh, then busts out laughing and can't move.
While he's laughing you run up and shoot him square in the head
putting him down, then jump through the Weapon Armory door.
You do a dive roll into the Weapon Armory, crouch and scan the room
for more Gothons that might be hiding. It's dead quiet, too quiet.
You stand up and run to the far side of the room and find the
neutron bomb in its container. There's a keypad lock on the box
and you need the code to get the bomb out. If you get the code
wrong 10 times then the lock closes forever and you can't
get the bomb. The code is 3 digits.
""")
the_bridge = Room("The Bridge",
"""
The container clicks open and the seal breaks, letting gas out.
You grab the neutron bomb and run as fast as you can to the
bridge where you must place it in the right spot.
You burst onto the Bridge with the netron destruct bomb
under your arm and surprise 5 Gothons who are trying to
take control of the ship. Each of them has an even uglier
clown costume than the last. They haven't pulled their
weapons out yet, as they see the active bomb under your
arm and don't want to set it off.
""")
escape_pod = Room("Escape Pod",
"""
You point your blaster at the bomb under your arm
and the Gothons put their hands up and start to sweat.
You inch backward to the door, open it, and then carefully
place the bomb on the floor, pointing your blaster at it.
You then jump back through the door, punch the close button
and blast the lock so the Gothons can't get out.
Now that the bomb is placed you run to the escape pod to
get off this tin can.
You rush through the ship desperately trying to make it to
the escape pod before the whole ship explodes. It seems like
hardly any Gothons are on the ship, so your run is clear of
interference. You get to the chamber with the escape pods, and
now need to pick one to take. Some of them could be damaged
but you don't have time to look. There's 5 pods, which one
do you take?
""")
the_end_winner = Room("The End",
"""
You jump into pod 2 and hit the eject button.
The pod easily slides out into space heading to
the planet below. As it flies to the planet, you look
back and see your ship implode then explode like a
bright star, taking out the Gothon ship at the same
time. You won!
""")
the_end_loser = Room("The End",
"""
You jump into a random pod and hit the eject button.
The pod escapes out into the void of space, then
implodes as the hull ruptures, crushing your body
into jam jelly.
"""
)
escape_pod.add_paths({
'2': the_end_winner,
'*': the_end_loser
})
generic_death = Room("death", "You died.")
the_bridge.add_paths({
'throw the bomb': generic_death,
'slowly place the bomb': escape_pod
})
laser_weapon_armory.add_paths({
'0132': the_bridge,
'*': generic_death
})
central_corridor.add_paths({
'shoot!': generic_death,
'dodge!': generic_death,
'tell a joke': laser_weapon_armory
})
START = 'central_corridor'
def load_room(name):
"""
There is a potential security problem here.
Who gets to set name? Can that expose a variable?
"""
return globals().get(name)
def name_room(room):
"""
Same possible security problem. Can you trust room?
What's a better solution than this globals lookup?
"""
for key, value in globals().items():
if value == room:
return key
|
ghostwords/localore | localore/home/migrations/0024_homepagefeaturedpage.py | Python | mpl-2.0 | 1,191 | 0.004198 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0028_merge'),
('home', '0023_ | remove_homepage_live_feed_intro'),
]
operations = [
migrations.CreateModel(
name='HomePageFeaturedPage',
fields=[
('id', mo | dels.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('sort_order', models.IntegerField(blank=True, null=True, editable=False)),
('title', models.CharField(blank=True, max_length=255, help_text='Leave blank to use page title.')),
('subtitle', models.CharField(max_length=255)),
('featured_page', models.ForeignKey(verbose_name='page to feature', to='wagtailcore.Page', related_name='+')),
('home_page', modelcluster.fields.ParentalKey(to='home.HomePage', related_name='featured_pages')),
],
options={
'abstract': False,
'ordering': ['sort_order'],
},
),
]
|
Ruide/angr-dev | angr/angr/factory.py | Python | bsd-2-clause | 18,703 | 0.006683 | from .sim_state import SimState
from .calling_conventions import DEFAULT_CC, SimRegArg, SimStackArg, PointerWrapper
from .callable import Callable
import logging
l = logging.getLogger("angr.factory")
_deprecation_cache = set()
def deprecate(name, replacement):
def wrapper(func):
def inner(*args, **kwargs):
if name not in _deprecation_cache:
l.warning("factory.%s is deprecated! Please use factory.%s instead.", name, replacement)
_deprecation_cache.add(name)
return func(*args, **kwargs)
return inner
return wrapper
class AngrObjectFactory(object):
"""
This factory provides access to important analysis elements.
"""
def __init__(self, project, default_engine, procedure_engine, engines):
# currently the default engine MUST be a vex engine... this assumption is hardcoded
# but this can totally be changed with some interface generalization
self._project = project
self._default_cc = DEFAULT_CC[project.arch.name]
self.default_engine = default_engine
self.procedure_engine = procedure_engine
self.engines = engines
def snippet(self, addr, jumpkind=None, **block_opts):
if self._project.is_hooked(addr) and jumpkind != 'Ijk_NoHook':
hook = self._project._sim_procedures[addr]
size = hook.kwargs.get('length', 0)
return H | ookNode(addr, size, self._project.hooked_by(addr))
else:
return self.block(addr, **block_opts).codenode # pylint: disable=no-member
def successors(self, state,
ad | dr=None,
jumpkind=None,
inline=False,
default_engine=False,
engines=None,
**kwargs):
"""
Perform execution using any applicable engine. Enumerate the current engines and use the
first one that works. Return a SimSuccessors object classifying the results of the run.
:param state: The state to analyze
:param addr: optional, an address to execute at instead of the state's ip
:param jumpkind: optional, the jumpkind of the previous exit
:param inline: This is an inline execution. Do not bother copying the state.
:param default_engine: Whether we should only attempt to use the default engine (usually VEX)
:param engines: A list of engines to try to use, instead of the default.
Additional keyword arguments will be passed directly into each engine's process method.
"""
if default_engine:
engines = [self.default_engine]
if engines is None:
engines = self.engines
if addr is not None or jumpkind is not None:
state = state.copy()
if addr is not None:
state.ip = addr
if jumpkind is not None:
state.history.jumpkind = jumpkind
r = None
for engine in engines:
if engine.check(state, inline=inline, **kwargs):
r = engine.process(state, inline=inline,**kwargs)
if r.processed:
break
if r is None or not r.processed:
raise AngrExitError("All engines failed to execute!")
# Peek and fix the IP for syscalls
if r.successors and r.successors[0].history.jumpkind.startswith('Ijk_Sys'):
self._fix_syscall_ip(r.successors[0])
# fix up the descriptions... TODO do something better than this
description = str(r)
l.info("Ticked state: %s", description)
for succ in r.successors:
succ.history.description = description
return r
def blank_state(self, **kwargs):
"""
Returns a mostly-uninitialized state object. All parameters are optional.
:param addr: The address the state should start at instead of the entry point.
:param initial_prefix: If this is provided, all symbolic registers will hold symbolic values with names
prefixed by this string.
:param fs: A dictionary of file names with associated preset SimFile objects.
:param concrete_fs: bool describing whether the host filesystem should be consulted when opening files.
:param chroot: A path to use as a fake root directory, Behaves similarly to a real chroot. Used only
when concrete_fs is set to True.
:param kwargs: Any additional keyword args will be passed to the SimState constructor.
:return: The blank state.
:rtype: SimState
"""
return self._project._simos.state_blank(**kwargs)
def entry_state(self, **kwargs):
"""
Returns a state object representing the program at its entry point. All parameters are optional.
:param addr: The address the state should start at instead of the entry point.
:param initial_prefix: If this is provided, all symbolic registers will hold symbolic values with names
prefixed by this string.
:param fs: a dictionary of file names with associated preset SimFile objects.
:param concrete_fs: boolean describing whether the host filesystem should be consulted when opening files.
:param chroot: a path to use as a fake root directory, behaves similar to a real chroot. used only when
concrete_fs is set to True.
:param argc: a custom value to use for the program's argc. May be either an int or a bitvector. If
not provided, defaults to the length of args.
:param args: a list of values to use as the program's argv. May be mixed strings and bitvectors.
:param env: a dictionary to use as the environment for the program. Both keys and values may be
mixed strings and bitvectors.
:return: The entry state.
:rtype: SimState
"""
return self._project._simos.state_entry(**kwargs)
def full_init_state(self, **kwargs):
"""
Very much like :meth:`entry_state()`, except that instead of starting execution at the program entry point,
execution begins at a special SimProcedure that plays the role of the dynamic loader, calling each of the
initializer functions that should be called before execution reaches the entry point.
:param addr: The address the state should start at instead of the entry point.
:param initial_prefix: If this is provided, all symbolic registers will hold symbolic values with names
prefixed by this string.
:param fs: a dictionary of file names with associated preset SimFile objects.
:param concrete_fs: boolean describing whether the host filesystem should be consulted when opening files.
:param chroot: a path to use as a fake root directory, behaves similar to a real chroot. used only when
concrete_fs is set to True.
:param argc: a custom value to use for the program's argc. May be either an int or a bitvector. If
not provided, defaults to the length of args.
:param args: a list of values to use as arguments to the program. May be mixed strings and bitvectors.
:param env: a dictionary to use as the environment for the program. Both keys and values may be
mixed strings and bitvectors.
:return: The fully initialized state.
:rtype: SimState
"""
return self._project._simos.state_full_init(**kwargs)
def call_state(self, addr, *args, **kwargs):
"""
Returns a state object initialized to the start of a given function, as if it were called with given parameters.
|
tokuhirom/ycmd | ycmd/handlers.py | Python | gpl-3.0 | 7,766 | 0.039403 | #!/usr/bin/env python
#
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from os import path
try:
import ycm_core
except ImportError as e:
raise RuntimeError(
'Error importing ycm_core. Are you sure you have placed a '
'version 3.2+ libclang.[so|dll|dylib] in folder "{0}"? '
'See the Installation Guide in the docs. Full error: {1}'.format(
path.realpath( path.join( path.abspath( __file__ ), '../..' ) ),
str( e ) ) )
import atexit
import logging
import json
import bottle
import httplib
from bottle import request, response
import server_state
from ycmd import user_options_store
from ycmd.responses import BuildExceptionResponse, BuildCompletionResponse
from ycmd import hmac_plugin
from ycmd import extra_conf_store
from ycmd.request_wrap import RequestWrap
# num bytes for the request body buffer; request.json only works if the request
# size is less than this
bottle.Request.MEMFILE_MAX = 1000 * 1024
_server_state = None
_hmac_secret = None
_logger = logging.getLogger( __name__ )
app = bottle.Bottle()
@app.post( '/event_notification' )
def EventNotification():
_logger.info( 'Received event notification' )
request_data = RequestWrap( request.json )
event_name = request_data[ 'event_name' ]
_logger.debug( 'Event name: %s', event_name )
event_handler = 'On' + event_name
getattr( _server_state.GetGeneralCompleter(), event_handler )( request_data )
filetypes = request_data[ 'filetypes' ]
response_data = None
if _server_state.FiletypeCompletionUsable( filetypes ):
response_data = getattr( _server_state.GetFiletypeCompleter( filetypes ),
event_handler )( request_data )
if response_data:
return _JsonResponse( response_data )
return _JsonResponse( {} )
@app.post( '/run_completer_command' )
def RunCompleterCommand():
_logger.info( 'Received command request' )
request_data = RequestWrap( request.json )
completer = _GetCompleterForRequestData( request_data )
return _JsonResponse( completer.OnUserCommand(
request_data[ 'command_arguments' ],
request_data ) )
@app.post( '/completions' )
def GetCompletions():
_logger.info( 'Received completion request' )
request_data = RequestWrap( request.json )
do_filetype_completion = _server_state.ShouldUseFiletypeCompleter(
request_data )
_logger.debug( 'Using filetype completion: %s', do_filetype_completion )
filetypes = request_data[ 'filetypes' ]
completer = ( _server_state.GetFiletypeCompleter( filetypes ) if
do_filetype_completion else
_server_state.GetGeneralCompleter() )
return _JsonResponse( BuildCompletionResponse(
completer.ComputeCandidates( request_data ),
request_data.CompletionStartColumn() ) )
@app.get( '/healthy' )
def GetHealthy():
_logger.info( 'Received health request' )
if request.query.include_subservers:
cs_completer = _server_state.GetFiletypeCompleter( ['cs'] )
return _JsonResponse( cs_completer.ServerIsRunning() )
return _JsonResponse( True )
@app.get( '/ready' )
def GetReady():
_logger.info( 'Received ready request' )
if request.query.include_subservers:
cs_completer = _server_state.GetFiletypeCompleter( ['cs'] )
return _JsonResponse( cs_completer.ServerIsReady() )
return _JsonResponse( True )
@app.post( '/semantic_completion_available' )
def FiletypeCompletionAvailable():
_logger.info( 'Received filetype completion available request' )
return _JsonResponse( _server_state.FiletypeCompletionAvailable(
RequestWrap( request.json )[ 'filetypes' ] ) )
@app.post( '/defined_subcommands' )
def DefinedSubcommands():
_logger.info( 'Received defined subcommands request' )
completer = _GetCompleterForRequestData( RequestWrap( request.json ) )
return _JsonResponse( completer.DefinedSubcommands() )
@app.post( '/detailed_diagnostic' )
def GetDetailedDiagnostic():
_logger.info( 'Received detailed diagnostic request' )
request_data = RequestWrap( request.json )
completer = _GetCompleterForRequestData( request_data )
return _JsonResponse( completer.GetDetailedDiagnostic( request_data ) )
@app.post( '/load_extra_conf_file' )
def LoadExtraConfFile():
_logger.info( 'Received extra conf load request' )
request_data = RequestWrap( request.json, validate = False )
extra_conf_store.Load( request_data[ 'filepath' ], force = True )
@app.post( '/ignore_extra_conf_file' )
def IgnoreExtraConfFile():
_logger.info( 'Received extra conf ignore request' )
request_data = RequestWrap( request.json, validate = False )
extra_conf_store.Disable( request_data[ 'filepath' ] )
@app.post( '/debug_info' )
def DebugInfo():
_logger.info( 'Received debug info request' )
output = []
has_clang_support = ycm_core.HasClangSupport()
output.append( 'Server has Clang support compiled in: {0}'.format(
has_clang_support ) )
if has_clang_support:
output.append( 'Clang version: ' + ycm_core.ClangVersion() )
request_data = RequestWrap( request.json )
try:
output.append(
_GetCompleterForRequestData( request_data ).DebugInfo( request_data) )
except:
pass
return _JsonResponse( '\n'.join( output ) )
# The type of the param is Bottle.HTTPError
@app.error( httplib.INTERNAL_SERVER_ERROR )
def ErrorHandler( httperror ):
body = _JsonResponse( BuildExceptionResponse( httperror.exception,
httperror.traceback ) )
hmac_plugin.SetHmacHeader( body, _hmac_secret )
return body
def _JsonResponse( data ):
response.set_header( 'Content-Type', 'application/json' )
return json.dumps( data, default = _UniversalSerialize )
def _UniversalSerialize( obj ):
try:
serialized = obj.__dict__.copy()
serialized[ 'TYPE' ] = type( obj ).__name__
return serialized
except AttributeError:
return str( obj )
def _GetCompleterForRequestData( request_data ):
completer_target = request_data.get( 'completer_ta | rget', None )
if completer_target == 'identifier':
return _server_state.GetGeneralCompleter() | .GetIdentifierCompleter()
elif completer_target == 'filetype_default' or not completer_target:
return _server_state.GetFiletypeCompleter( request_data[ 'filetypes' ] )
else:
return _server_state.GetFiletypeCompleter( [ completer_target ] )
@atexit.register
def ServerShutdown():
_logger.info( 'Server shutting down' )
if _server_state:
_server_state.Shutdown()
extra_conf_store.Shutdown()
def SetHmacSecret( hmac_secret ):
global _hmac_secret
_hmac_secret = hmac_secret
def UpdateUserOptions( options ):
global _server_state
if not options:
return
# This should never be passed in, but let's try to remove it just in case.
options.pop( 'hmac_secret', None )
user_options_store.SetAll( options )
_server_state = server_state.ServerState( options )
def SetServerStateToDefaults():
global _server_state, _logger
_logger = logging.getLogger( __name__ )
user_options_store.LoadDefaults()
_server_state = server_state.ServerState( user_options_store.GetAll() )
extra_conf_store.Reset()
|
williamjacksn/rainwave | api_requests/admin_web/developer.py | Python | gpl-2.0 | 4,050 | 0.021975 | from time import time as timestamp
import hashlib
from api.web import APIHandler
from api.exceptions import APIException
from api.server import handle_api_url
from libs import config
from libs import db
@handle_api_url("test/create_anon_tuned_in/(\d+)")
class CreateAnonTunedIn(APIHandler):
description = "Creates a fake tune-in record for an anonymous user at 127.0.0.1."
local_only = True
sid_required = False
auth_required = False
allow_get = True
return_name = "create_anon_tuned_in_result"
def post(self, sid): #pylint: disable=W0221
if db.c.fetch_var("SELECT COUNT(*) FROM r4_listeners WHERE listener_ip = '127.0.0.1' AND user_id = 1") == 0:
db.c.update("INSERT INTO r4_listeners (listener_ip, user_id, sid, listener_icecast_id) VALUES ('127.0.0.1', 1, %s, 1)", (int(sid),))
self.append_standard("dev_anon_user_tunein_ok", "Anonymous user tune in 127.0.0.1 record completed.")
return
if db.c.fetch_var("SELECT COUNT(*) FROM r4_listeners WHERE listener_ip = '::1' AND user_id = 1") == 0:
db.c.update("INSERT INTO r4_listeners (listener_ip, user_id, sid, listener_icecast_id) VALUES ('::1', 1, %s, 1)", (int(sid),))
self.append_standard("dev_anon_user_tunein_ok", "Anonymous user tune in ::1 record completed.")
return
if db.c.fetch_var("SELECT COUNT(*) FROM r4_listeners WHERE listener_ip = 'localhost' AND user_id = 1") == 0:
db.c.update("INSERT INTO r4_listeners (listener_ip, user_id, sid, listener_icecast_id) VALUES ('localhost', 1, %s, 1)", (int(sid),))
self.append_standard("dev_anon_user_tunein_ok", "Anonymous user tune in localhost record completed.")
return
raise APIException(500, "internal_error", "Anonymous user tune in record already exists.")
class TestUserRequest(APIHandler):
description = "Login as a user."
local_only = True
sid_required = False
auth_required = False
allow_get = True
def post(self, sid): #pylint: disable=W0221
user_id = db.c.fetch_var("SELECT MAX(user_id) FROM phpbb_users")
if user_id | and user_id < 2:
user_id = user_id + 1
db.c.update("INSERT INTO phpbb_users (username, user_id, group_id) VALUES ('Test" + str(user_id) + "', %s, 5)", (user_id,))
elif not user_id:
user_id = 2
db.c.update("INSERT INTO phpbb_users (username, user_id, group_id) | VALUES ('Test" + str(user_id) + "', %s, 5)", (user_id,))
self.set_cookie(config.get("phpbb_cookie_name") + "_u", user_id)
session_id = db.c.fetch_var("SELECT session_id FROM phpbb_sessions WHERE session_user_id = %s", (user_id,))
if not session_id:
session_id = hashlib.md5(repr(timestamp())).hexdigest()
db.c.update("INSERT INTO phpbb_sessions (session_id, session_user_id) VALUES (%s, %s)", (session_id, user_id))
self.set_cookie(config.get("phpbb_cookie_name") + "_u", user_id)
self.set_cookie(config.get("phpbb_cookie_name") + "_sid", session_id)
self.execute(user_id, sid)
self.append_standard("dev_login_ok", "You are now user ID %s session ID %s" % (user_id, session_id))
def execute(self, user_id, sid):
pass
@handle_api_url("test/login_tuned_in/(\d+)")
class CreateLoginTunedIn(TestUserRequest):
description = "Creates or uses a user account with a tuned in record and sets the appropriate cookies so you're that user."
auth_required = False
sid_required = False
return_name = "login_tuned_in_result"
def execute(self, user_id, sid):
if db.c.fetch_var("SELECT COUNT(*) FROM r4_listeners WHERE user_id = %s", (user_id,)) == 0:
db.c.update("INSERT INTO r4_listeners (listener_ip, user_id, sid, listener_icecast_id) VALUES ('127.0.0.1', %s, %s, 1)", (user_id, sid))
@handle_api_url("test/login_tuned_out/(\d+)")
class CreateLoginTunedOut(TestUserRequest):
description = "Creates or uses a user account with no tuned in record sets the appropriate cookies so you're that user."
auth_required = False
return_name = "login_tuned_out_result"
def execute(self, user_id, sid):
if db.c.fetch_var("SELECT COUNT(*) FROM r4_listeners WHERE user_id = %s", (user_id,)) > 0:
db.c.update("DELETE FROM r4_listeners WHERE user_id = %s ", (user_id,))
|
YaoQ/faceplusplus-demo | recognition.py | Python | gpl-2.0 | 1,492 | 0.015416 | #!/usr/bin/env python2
# Import system libraries and define helper functions
import time
import sys
import os
import os.path
from pprint import pformat
# First import the API class from the SDK
from facepp import API
from facepp import File
def print_result(hint, result):
def encode(obj):
if type(obj) is unicode:
return obj.encode('utf-8')
if type(obj) is dict:
return {encode(k): encode(v) for (k, v) in obj.iteritems()}
if type(obj) is list:
return [encod | e(i) for i in obj]
return obj
print hint
result = encode(result)
print '\n'.join([' ' + i for i in pformat(result, width = 75).split('\n')])
def init():
fdir = os.path.dirname(__file__)
with open(os.path.join(fdir, 'apikey.cfg')) as f:
exec(f.read())
srv = locals().get('SERVER')
return API(API_KEY, API_SECRET, srv = srv)
api = init()
if len(sys.argv) < 3:
print 'error!'
print './recognize.py <group_name> <image_p | ath>'
sys.exit()
group = sys.argv[1]
IMAGE_DIR = sys.argv[2]
rst = api.recognition.identify( group_name = group,img = File(IMAGE_DIR))
#print 'The person with highest confidence:', \
# rst['face'][0]['candidate'][0]['person_name']
if rst['face']:
print 'The person with highest confidence:', \
rst['face'][0]['candidate'][0]['person_name']
print 'Confidence is :',\
rst['face'][0]['candidate'][0]['confidence']
else:
print 'There is no face that is detected!'
|
FRBs/DM | frb/surveys/dlsurvey.py | Python | bsd-3-clause | 5,919 | 0.008616 | """
DataLab survey class. Gets data from any survey
available through the NOAO datalab-client.
"""
import pdb
import numpy as np
import warnings
from astropy.table import Table
from astropy import units
import warnings
import sys, os
try:
from dl import queryClient as qc, authClient as ac
from dl.helpers.utils import convert
except:
print("Warning: datalab-client is not installed or will not properly connect")
from frb.surveys import surveycoord
class DL_Survey(surveycoord.SurveyCoord):
"""
A survey class for all databases hosted
by NOAO's DataLab. Inherits from SurveyCoord
"""
def __init__(self, coord, radius, **kwargs):
surveycoord.SurveyCoord.__init__(self, coord, radius, **kwargs)
#Define photmetric band names.
self.token = ac.login('anonymous')
self.bands = []
#Instantiate sia service
self.svc = None
#Generate query
self.query = None
self.qc_profile = None
def _parse_cat_band(self,band):
return None, None, None
def _gen_cat_query(self,query_fields=None):
pass
def _select_best_img(self,imgTable,verbose,timeout=120):
pass
def get_catalog(self, query=None, query_fields=None, print_query=False,timeout=120):
"""
Get catalog sources around the given coordinates
within self.radius.
Args:
query (str, optional): SQL query to generate the catalog
query_fields (list, optional): Over-ride list of items to query
print_query (bool): Print the SQL query generated
Returns:
astropy.table.Table: Catalog of sources obtained from the SQL query.
"""
qc.set_profile(self.qc_profile)
# Generate the query
if query is None:
self._gen_cat_query(query_fields)
query = self.query
if print_query:
print(query)
# Do it while silencing print statements
result = qc.query(self.token, sql=query,timeout=timeout)
self.catalog = convert(result,outfmt="table")
self.catalog.meta['radius'] = self.radius
self.catalog.meta['survey'] = self.survey
# Validate
self.validate_catalog()
# Return
return self.catalog.copy()
def get_image(self, imsize, band, timeo | ut=120, verbose=False):
"""
Get images from the catalog if available
for a given fov and band.
Args:
imsize (Quantity): FOV for the desired image
band (str): Band for the image (e.g. 'r')
timeout (int, optional): Time to wait in seconds before timing out
verbose (bool, optional):
Returns:
HDU: Image header data unit
"""
ra = self.coord.ra.value
dec = self.coord.de | c.value
fov = imsize.to(units.deg).value
if band.lower() not in self.bands and band not in self.bands:
raise TypeError("Allowed filters (case-insensitive) for {:s} photometric bands are {}".format(self.survey,self.bands))
table_cols, col_vals, bandstr = self._parse_cat_band(band)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
imgTable = self.svc.search((ra,dec), (fov/np.cos(dec*np.pi/180), fov), verbosity=2).to_table()
if verbose:
print("The full image list contains", len(imgTable), "entries")
#Select band
selection = imgTable['obs_bandpass'].astype(str)==bandstr
#from IPython import embed; embed(header='117')
#Select images in that band
for column, value in zip(table_cols,col_vals):
selection = selection & ((imgTable[column].astype(str)==value))
imgTable = imgTable[selection]
if(len(imgTable)>0):
imagedat = self._select_best_img(imgTable,verbose=True,timeout=timeout)
img_hdu = imagedat[0]
else:
print('No image available')
img_hdu = None
return img_hdu
def get_cutout(self, imsize, band=None):
"""
Get cutout (and header)
Args:
imsize (Quantity): e.g 10*units.arcsec
band (str): e.g. 'r'
Returns:
ndarray, Header: cutout image, cutout image header
"""
self.cutout_size = imsize
if band is None:
if "r" in self.bands:
band = "r"
elif band is None:
band = self.bands[-1]
warnings.warn("Retrieving cutout in {:s} band".format(band))
img_hdu = self.get_image(imsize, band)
if img_hdu is not None:
self.cutout = img_hdu.data
self.cutout_hdr = img_hdu.header
else:
self.cutout = None
self.cutout_hdr = None
return self.cutout, self.cutout_hdr
def _default_query_str(query_fields, database, coord, radius):
"""
Generates default query string for a catalog search.
Args:
query_fields (list of str): A list of query fields to
retrieve from the database
database (str): Name of the database
coord (astropy.coordinates.SkyCoord): Central coordinate of the search
radius (astropy.units.Quantity or Angle): Search radius
Returns:
str: A query to be fed to datalab's SQL client
"""
query_field_str = ""
for field in query_fields:
query_field_str += " {:s},".format(field)
# Remove last comma
query_field_str = query_field_str[:-1]
default_query = """SELECT{:s}
FROM {:s}
WHERE q3c_radial_query(ra,dec,{:f},{:f},{:f})
""".format(query_field_str,database,coord.ra.value,
coord.dec.value,radius.to(units.deg).value)
return default_query
|
pabulumm/neighbors | budget/forms.py | Python | bsd-3-clause | 408 | 0.046569 | from .m | odels import Budget, Expense
from django import forms
class BudgetForm(forms.ModelForm):
class Meta:
model = Budget
fields = (
'title',
'residence_fee',
'neighborhood',
)
class ExpenseForm(forms.ModelForm):
class Meta:
model = Expense
fields = (
'title',
'description',
'cost',
'start_date',
'end_date', |
'types',
'budget',
) |
deluxghost/DelogX | DelogX/defaults/plugins/delog_readmore/__init__.py | Python | lgpl-3.0 | 1,320 | 0 | # -*- coding: utf-8 -*-
import re
from DelogX.utils.i18n import I18n
from DelogX.utils.path import Path
from DelogX.utils.plugin import Plugin
class DelogReadMore(Plugin):
i18n = None
def run(self):
conf = self.blog.default_conf
self.i18n = I18n(
Path.format_url(self.workspace, 'locale'), conf('local.locale'))
self.manager.add_action('dx_post_update', self.parse_readmore)
def parse_readmore(self, post):
if not post: |
return
content_split = re.split(r'<[Hh][Rr](?:\ | s+\/)?>', post.content, 1)
if len(content_split) == 2:
summary, more = content_split
else:
summary = content_split[0]
more = ''
post_url = self.blog.runtime.get('url_prefix.post')
post_url = Path.format_url(post_url, Path.urlencode(post.url))
content = '''{0}
<div class="{1}"><a href="{2}">{3}</a></div>
<div class="post-more">{4}</div>
'''
more_class = ['read-more']
if not more:
more_class.append('no-more-content')
more_class = ' '.join(more_class)
content = content.format(
summary, more_class, post_url, self.i18n.get('Read More'), more)
post.content = content
|
coufon/neon-distributed | neon/backends/kernels/cuda/roipooling.py | Python | apache-2.0 | 9,019 | 0.000887 | # ----------------------------------------------------------------------------
# Copyright 2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# ------------------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see fast-rcnn/LICENSE for details]
# Written by Ross Girshick
# ------------------------------------------------------------------
from pycuda.compiler import SourceModule
from pycuda.tools import context_dependent_memoize
"""
CUDA kernels for ROI pooling layers.
There is a fprop function, a bprop function.
The fprop and bprop CUDA-C code are adapted from Fast R-CNN model.
Each of the kernels uses templating to perform %(type)
conversion so it works for all data types (currently fp32 and fp16 are supported).
"""
def map_string2func(funcname, clss):
"""
Helper function that converts string function names to function calls
"""
if funcname == "fprop_roipooling":
return _get_fprop_roipooling(clss)
if funcname == "bprop_roipooling":
return _get_bprop_roipooling(clss)
raise AttributeError("kernel type '" + funcname + "' not understood")
# This section of the code contains templated CUDA-C code for the kernels.
@context_dependent_memoize
def _get_fprop_roipooling(clss):
code = r"""
#define FLT_MAX 3.402823466E+38F
__global__ void fprop_roipooling(const int nthreads,
const int num_rois, const int img_count,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const float* bottom_data, const float* bottom_rois, float* top_data,
int* argmax_data, const float spatial_scale) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; \
index < (nthreads); index += blockDim.x * gridDim.x){
// (c, ph, pw, n) is an element in the pooled output
int n = index % num_rois;
int pw = (index / num_rois) % pooled_width;
int ph = (index / num_rois / pooled_width) % pooled_height;
int c = index / num_rois / pooled_width / pooled_height;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
float bin_size_h = static_cast<float>(roi_height)
/ static_cast<float>(pooled_height);
float bin_size_w = static_cast<float>(roi_width)
/ static_cast<float>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<float>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<float>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<float>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<float>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
float maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += c * height * width * img_count;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width * img_count + w * img_count + roi_batch_ind;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
// Notice the maxidx (from bottom_index) is relative to the dimension
// (h, w, img_count) of the feature map, so max value is HWN
}
}
"""
module = SourceModule(code)
kernel = module.get_function("fprop_roipooling")
sig = "8I 4P 1f"
kernel.prepare(sig)
return kernel
# This section of the code contains templated CUDA-C code for the kernels.
@context_dependent_memoize
def _get_bprop_roipooling(clss):
code = r"""
__global__ void bprop_roipooling(const int nthreads,
const int num_rois, const int img_count,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const float* top_diff, const float* bottom_rois, float* bottom_diff,
const int* argmax_data, const float spatial_scale) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; \
index < ( | nthreads); index += blockDim.x * gridDim.x){
// (c, h, w, n) coords in bottom data on feature map
int n = index % img_count;
int w = (index / img_count) % width;
int h = (index / img_count / width) % height;
int c = index / img_count/ width / height;
float gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
| const float* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = c * pooled_height * pooled_width * num_rois;
const float* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
float bin_size_h = static_cast<float>(roi_height)
/ static_cast<float>(pooled_height);
float bin_size_w = static_cast<float>(roi_width)
/ static_cast<float>(pooled_width);
int phstart = floor(static_cast<float>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<float>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<float>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<float>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, |
ruchee/vimrc | vimfiles/bundle/vim-python/submodules/pylint/tests/functional/d/defined_and_used_on_same_line.py | Python | mit | 703 | 0.01707 | """Check for definitions and usage happ | ening on the same line."""
#pylint: disable=missing-docstring,multiple-statements,no-absolute-import,parameter-unpacking,wrong-import-position,unnecessary-comprehension
from __future__ import print_function
print([index
| for index in range(10)])
print((index
for index in range(10)))
FILTER_FUNC = lambda x: not x
def func(xxx): return xxx
def func2(xxx): return xxx + func2(1)
import sys; print(sys.exc_info())
for i in range(10): print(i)
j = 4; LAMB = lambda x: x+j
FUNC4 = lambda a, b: a != b
# test https://www.logilab.org/ticket/6954:
with open('f') as f: print(f.read())
with open('f') as f, open(f.read()) as g:
print(g.read())
|
JA-VON/python-helpers-msbm | converter.py | Python | mit | 10,280 | 0.01216 | # coding=utf-8
__author__ = 'javon'
import sys
rdfTemplate = """<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF
xmlns="http://vocab.cdmk-caribbean.net/cdmk#"
xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
xmlns:owl="http://www.w3.org/2002/07/owl#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:dct="http://purl.org/dc/terms/"
xmlns:skos="http://www.w3.org/2004/02/skos/core#"
xmlns:skosxl="http://www.w3.org/2008/05/skos-xl#"
xmlns:ann="http://art.uniroma2.it/ontologies/annotation#">
"""
topLevelConcept = """
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk">
<rdf:type rdf:resource="http://www.w3.org/2004/02/skos/core#ConceptScheme"/>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk/DRM_Ontology">
<rdf:type rdf:resource="http://www.w3.org/2008/05/skos-xl#Label"/>
<skosxl:literalForm xml:lang="en">Disaster Recovery Management</skosxl:literalForm>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk">
<skosxl:prefLabel rdf:resource="http://vocab.cdmk-caribbean.net/cdmk/DRM_Ontology"/>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk/c_e34f6879">
<rdf:type rdf:resource="http://www.w3.org/2004/02/skos/core#Concept"/>
<skos:inScheme rdf:resource="http://vocab.cdmk-caribbean.net/cdmk"/>
<skos:topConceptOf rdf:resource="http://vocab.cdmk-caribbean.net/cdmk"/>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk/xl_en_f27a3993">
<rdf:type rdf:resource="http://www.w3.org/2008/05/skos-xl#Label"/>
<skosxl:literalForm xml:lang="en">CDMK</skosxl:literalForm>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk/c_e34f6879">
<skosxl:prefLabel rdf:resource="http://vocab.cdmk-caribbean.net/cdmk/xl_en_f27a3993"/>
<hasStatus xmlns="http://art.uniroma2.it/ontologies/vocbench#">Proposed</hasStatus>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk/xl_en_f27a3993">
<hasStatus xmlns="http://art.uniroma2.it/ontologies/vocbench#">Proposed</hasStatus>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk/c_e34f6879">
<dct:created rdf:datatype="xsd:http://www.w3.org/2001/XMLSchema#dateTime">2016-03-30T21:08:54Z</dct:created>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk/xl_en_f27a3993">
<dct:created rdf:datatype="xsd:http://www.w3.org/2001/XMLSchema#dateTime">2016-03-30T21:08:54Z</dct:created>
<dct:modified rdf:datatype="http://www.w3.org/2001/XMLSchema#dateTime">2016-03-30T21:08:54Z</dct:modified>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk/c_e34f6879">
<dct:modified rdf:datatype="http://www.w3.org/2001/XMLSchema#dateTime">2016-03-30T21:09:33Z</dct:modified>
</rdf:Description>
"""
topLevelConceptFuseki = """
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk">
<rdf:type rdf:resource="http://www.w3.org/2004/02/skos/core#ConceptScheme"/>
<skos:prefLabel xml:lang="en">Disaster Recovery Management</skos:prefLabel>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk/c_e34f6879">
<rdf:type rdf:resource="http://www.w3.org/2004/02/skos/core#Concept"/>
<skos:inScheme rdf:resource="http://vocab.cdmk-caribbean.net/cdmk"/>
<skos:topConceptOf rdf:resource="http://vocab.cdmk-caribbean.net/cdmk"/>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk/c_e34f6879">
<skos:prefLabel xml:lang="en">CDMK</skos:prefLabel>
<hasStatus xmlns="http://art.uniroma2.it/ontologies/vocbench#">Proposed</hasStatus>
<dct:created rdf:datatype="xsd:http://www.w3.org/2001/XMLSchema#dateTime">2016-03-30T21:08:54Z</dct:created>
<dct:modified rdf:datatype="http://www.w3.org/2001/XMLSchema#dateTime">2016-03-30T21:09:33Z</dct:modified>
</rdf:Description>
"""
elementDescription = """
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk#c_893a08ac1%(count)d">
<rdf:type rdf:resource="http://www.w3.org/2004/02/skos/core#Concept"/>
<skos:inScheme rdf:resource="http://vocab.cdmk-caribbean.net/cdmk"/>
<skos:broader rdf:resource="http://vocab.cdmk-caribbean.net/cdmk/c_e34f6879"/>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk#xl_en_b7d2aa081%(count)d">
<rdf:type rdf:resource="http://www.w3.org/2008/05/skos-xl#Label"/>
<skosxl:literalForm xml:lang="en">%(term)s</skosxl:literalForm>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk#c_893a08ac1%(count)d">
<skosxl:prefLabel rdf:resource="http://vocab.cdmk-caribbean.net/cdmk#xl_en_b7d2aa081%(count)d"/>
<hasStatus xmlns="http://art.uniroma2.it/ontologies/vocbench#">Proposed</hasStatus>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk#xl_en_b7d2aa081%(count)d">
<hasStatus xmlns="http://art.uniroma2.it/ontologies/vocbench#">Proposed</hasStatus>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk#c_893a08ac1%(count)d">
<dct:created rdf:datatype="xsd:http://www.w3.org/2001/XMLSchema#dateTime">2016-03-31T04:32:05Z</dct:created>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk#xl_en_b7d2aa081%(count)d">
<dct | :created rdf:datatype="xsd:http://www.w3.org/2001/XMLSchema#dateTime">2016-03-31T04:32:05Z</dct:created>
<dct:modified rdf:datatype="http://www.w3.org/2001/XMLSchema#dateTime">2016-03-31T04:32:05Z</dct:modified>
</rdf:Description>
<rdf:Description rdf:a | bout="http://vocab.cdmk-caribbean.net/cdmk#c_893a08ac1%(count)d">
<dct:modified rdf:datatype="http://www.w3.org/2001/XMLSchema#dateTime">2016-03-31T04:32:05Z</dct:modified>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk#c_893a08ac1%(count)d">
<skos:definition rdf:resource="http://vocab.cdmk-caribbean.net/cdmk#def_63e3c550%(count)d"/>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk#def_63e3c550%(count)d">
<rdf:value xml:lang="en">%(description)s</rdf:value>
<dct:created rdf:datatype="http://www.w3.org/2001/XMLSchema#dateTime">2016-03-30T22:22:53Z</dct:created>
</rdf:Description>
"""
elementDescriptionFuskei = """
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk#c_893a08ac1%(count)d">
<rdf:type rdf:resource="http://www.w3.org/2004/02/skos/core#Concept"/>
<skos:inScheme rdf:resource="http://vocab.cdmk-caribbean.net/cdmk"/>
<skos:broader rdf:resource="http://vocab.cdmk-caribbean.net/cdmk/c_e34f6879"/>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk#c_893a08ac1%(count)d">
<skos:prefLabel xml:lang="en">%(term)s</skos:prefLabel>
<dct:created rdf:datatype="xsd:http://www.w3.org/2001/XMLSchema#dateTime">2016-03-31T04:32:05Z</dct:created>
<hasStatus xmlns="http://art.uniroma2.it/ontologies/vocbench#">Proposed</hasStatus>
<dct:modified rdf:datatype="http://www.w3.org/2001/XMLSchema#dateTime">2016-03-31T04:32:05Z</dct:modified>
<skos:definition xml:lang="en">%(description)s</skos:definition>
</rdf:Description>
"""
broader = """
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk#c_%(count)d">
<rdf:type rdf:resource="http://www.w3.org/2004/02/skos/core#Concept"/>
<skos:inScheme rdf:resource="http://vocab.cdmk-caribbean.net/cdmk/CDMK"/>
<skos:broader rdf:resource="http://vocab.cdmk-caribbean.net/cdmk/Disaster_Recovery"/>
</rdf:Description>
<rdf:Description rdf:about="http://vocab.cdmk-caribbean.net/cdmk#c_%(count)d">
<skosxl:prefLabel rdf:resource="http://vocab.cdmk-caribbean.net/cdmk/%(validterm)s"/>
<hasStatus xmlns="http://art.uniroma2.it/ontologies/vocbench#">Proposed</hasStatus>
</rdf:Description>
"""
hastop = """<skos:hasTopConcept rdf:resource="http://vocab.cdmk-caribbean.net/cdmk/%(term)s"/>"""
endTag = """</rdf:RDF>"""
def read_phrase(fo,delimiter):
char = fo.read(1)
if char == ",":
return ""
if char =="\r":
fo.read(1)
char = fo.read(1)
if char == "\"":
char = fo.read(1)
delimiter = "\""
phrase = ""
while char != delimiter:
|
xingyepei/edx-platform | openedx/core/djangoapps/user_api/forms.py | Python | agpl-3.0 | 189 | 0 | # | pylint: disable=unused-import
# TODO: eventually move this implementation into the user_api
"""
Djan | go Administration forms module
"""
from student.forms import PasswordResetFormNoActive
|
century-arcade/xd | crossword/__init__.py | Python | mit | 397 | 0 | # -* | - coding: utf-8 -*-
__title__ = 'crossword'
__version__ = '0.1.2'
__author__ = 'Simeon Visser'
__email__ = 'simeonvisser@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014 Simeon Visser'
from crossword.core import Crossword
from crossword.exceptions import CrosswordException
from crossword.format_ipuz import from_ipuz, to_ipuz
from crossword.format | _puz import from_puz, to_puz
|
mapnik/mapnik | scons/scons-local-4.1.0/SCons/Variables/BoolVariable.py | Python | lgpl-2.1 | 2,855 | 0.002452 | # MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Option type for true/false Variables.
Usage example::
opts = Variables()
opts.Add(BoolVariable('embedded', 'build for an embedded system', 0))
...
if env['embedded'] == 1:
...
"""
__all__ = ['BoolVariable',]
import SCons.Errors
__true_strings = ('y', 'yes', 'true', 't', '1', 'on' , 'all' )
__false_strings = ('n', 'no', 'false', 'f', '0', 'off', 'none')
def _text2bool(val):
"""
Converts strings to True/False depending on the 'truth' expressed by |
the string. If the string can't be converted, the original value
will be returned.
See '__true_strings' and '__false_strings' for values considered
'true' or 'false respectively.
This is usable as 'converter' for SCons' Variables.
"""
lval = val.lower()
if lval in __true_ | strings: return True
if lval in __false_strings: return False
raise ValueError("Invalid value for boolean option: %s" % val)
def _validator(key, val, env):
"""
Validates the given value to be either '0' or '1'.
This is usable as 'validator' for SCons' Variables.
"""
if not env[key] in (True, False):
raise SCons.Errors.UserError(
'Invalid value for boolean option %s: %s' % (key, env[key]))
def BoolVariable(key, help, default):
"""
The input parameters describe a boolean option, thus they are
returned with the correct converter and validator appended. The
'help' text will by appended by '(yes|no) to show the valid
valued. The result is usable for input to opts.Add().
"""
return (key, '%s (yes|no)' % help, default,
_validator, _text2bool)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
PhillyNJ/SAMD21 | cryptoauthlib/python/cryptoauthlib/atcacert.py | Python | mit | 7,650 | 0.00549 | from ctypes import Structure, c_int, c_uint8, c_uint16, c_char, POINTER, Array
from .atcab import get_cryptoauthlib
from .atcaenum import AtcaEnum
import binascii
class atcacert_cert_type_t(AtcaEnum):
"""Types of certificates"""
CERTTYPE_X509 = 0 # Standard X509 certificate
CERTTYPE_CUSTOM = 1 # Custom format
class atcacert_cert_sn_src_t(AtcaEnum):
"""Sources for the certificate serial number"""
SNSRC_STORED = 0x0 # Cert serial is stored on the device.
SNSRC_STORED_DYNAMIC = 0x7 # Cert serial is stored on the device with the first byte being the DER size (X509 certs only).
SNSRC_DEVICE_SN = 0x8 # Cert serial number is 0x40(MSB) + 9-byte device serial number. Only applies to device certificates.
SNSRC_SIGNER_ID = 0x9 # Cert serial number is 0x40(MSB) + 2-byte signer ID. Only applies to signer certificates.
SNSRC_PUB_KEY_HASH = 0xA # Cert serial number is the SHA256(Subject public key + Encoded dates), with uppermost 2 bits set to 01.
SNSRC_DEVICE_SN_HASH = 0xB # Cert serial number is the SHA256(Device SN + Encoded dates), with uppermost 2 bits set to 01. Only applies to device certificates.
SNSRC_PUB_KEY_HASH_POS = 0xC # Depreciated, don't use. Cert serial number is the SHA256(Subject public key + Encoded dates), with MSBit set to 0 to ensure it's positive.
SNSRC_DEVICE_SN_HASH_POS = 0xD # Depreciated, don't use. Cert serial number is the SHA256(Device SN + Encoded dates), with MSBit set to 0 to ensure it's positive. Only applies to device certificates.
SNSRC_PUB_KEY_HASH_RAW = 0xE # Depreciated, don't use. Cert serial number is the SHA256(Subject public key + Encoded dates).
SNSRC_DEVICE_SN_HASH_RAW = 0xF # Depreciated, don't use. Cert serial number is the SHA256(Device SN + Encoded dates). Only applies to device certificates.
class atcacert_device_zone_t(AtcaEnum):
"""ATECC device zones. The values match the Zone Encodings as specified in the datasheet"""
DEVZONE_CONFIG = 0x00 # Configuration zone.
DEVZONE_OTP = 0x01 # One Time Programmable zone.
DEVZONE_DATA = 0x02 # Data zone (slots).
DEVZONE_NONE = 0x07 # Special value used to indicate there is no device location.
class atcacert_date_format_t(AtcaEnum):
DATEFMT_ISO8601_SEP = 0 # ISO8601 full date YYYY-MM-DDThh:mm:ssZ
DATEFMT_RFC5280_UTC = 1 # RFC 5280 (X.509) 4.1.2.5.1 UTCTime format YYMMDDhhmmssZ
DATEFMT_POSIX_UINT32_BE = 2 # POSIX (aka UNIX) date format. Seconds since Jan 1, 1970. 32 bit unsigned integer, big endian.
DATEFMT_POSIX_UINT32_LE = 3 # POSIX (aka UNIX) date format. Seconds since Jan 1, 1970. 32 bit unsigned integer, little endian.
DATEFMT_RFC5280_GEN = 4 # RFC 5280 (X.509) 4.1.2.5.2 GeneralizedTime format YYYYMMDDhhmmssZ
class atcacert_std_cert_element_t(AtcaEnum):
"""Standard dynamic certificate elements"""
STDCERT_PUBLIC_KEY = 0
STDCERT_SIGNATURE = 1
STDCERT_ISSUE_DATE = 2
STDCERT_EXPIRE_DATE = 3
STDCERT_SIGNER_ID = 4
STDCERT_CERT_SN = 5
STDCERT_AUTH_KEY_ID = 6
STDCERT_SUBJ_KEY_ID = 7
def _atcacert_convert_bytes(kwargs, name, pointer):
k = kwargs.get(name)
if k is not None:
k = k.replace(' ', '').strip()
byte_string = binascii.unhexlify(k)
kwargs[name] = pointer((c_uint8*len(byte_string))(*list(byte_string)))
def _atcacert_convert_enum(kwargs, name, enum):
k = kwargs.get(name)
if k is not None and k is not int:
kwargs[name] = int(getattr(enum, k))
def _atcacert_convert_structure(kwargs, name, structure):
k = kwargs.get(name)
if k is not None and type(k) is dict:
kwargs[name] = structure(**k)
def _atcacert_convert_array(kwargs, name, array):
k = kwargs.get(name)
if k is not None:
a = [array._type_(**e) for e in k]
kwargs[name] = array(*a)
class atcacert_device_loc_t(Structure):
_fields_ = [
('zone', c_int), # Zone in the device.
('slot', c_uint8), # Slot within the data zone. Only applies if zone is DEVZONE_DATA.
('is_genkey', c_uint8), # If true, use GenKey command to get the contents instead of Read.
('offset', c_uint16), # Byte offset in the zone.
('count', c_uint16) # Byte count.
]
def __init__(self, *args, **kwargs):
if kwargs is not None:
_atcacert_convert_enum(kwargs, 'zone', atcacert_device_zone_t)
super(atcacert_device_loc_t, self).__init__(*args, **kwargs)
class atcacert_cert_loc_t(Structure):
_fields_ = [('offset', c_uint16), ('count', c_uint16)]
class atcacert_cert_element_t(Structure):
_fields_ = [
('id', c_char * 16), # ID identifying this element.
('device_loc', atcacert_device_loc_t), # Location in the device for the element.
('cert_loc', atcacert_cert_loc_t) # Location in the certificate template for the element.
]
class atcacert_def_t(Structure):
_fields_ = [
('type', c_int), # Certificate type.
('template_id', c_uint8), # ID for the this certificate definition (4-bit value).
('chain_id', c_uint8), # ID for the certificate chain this definition is a part of (4-bit value).
('private_key_slot', c_uint8), #If this is a device certificate template, this is the device slot for the device private key.
('sn_source', c_int), # Where the certificate serial number comes from (4-bit value).
('cert_sn_dev_loc', atcacert_device_loc_t), # Only applies when sn_source is SNSRC_STORED or SNSRC_STORED_DYNAMIC. Describes where to get the certificate serial number on the device.
('issue_date_format', c_int), # Format of the issue date in the certificate.
('expire_date_format', c_int), # format of the expire date in the certificate.
('tbs_cert_loc', atcacert_cert_loc_t), # Location in the certificate for the TBS (to be signed) portion.
('expire_years', c_uint8), # Number of years the certificate is valid for (5-bit value). 0 means no expiration.
('public_key_dev_loc', atcacert_device_loc_t), # Where on the device the public key can be found.
('comp_cert_dev_loc', atcacert_device_loc_t), #Where on the device the compressed cert can be found.
('std_cert_elements', atcacert_cert_loc_t * 8), # Where in the certificate template the standard cert elements are inserted.
('cert_elements', POINTER(atcacert_cert_element_t)), # Additional certificate elements outside of the standard certificate contents.
('cert_elements_count', c_uint8), # Number of additional certificate elements in cert_elements.
('cert_template', POINTER(c_uint8)), #Pointer to the actual certificate template data.
('cert_template_size', c_uint16) # Size of the certificate template in cert_template in bytes.
]
def __init__(self, *args, **kwargs):
if kwargs is not None:
_atcacert_convert_enum(kwargs, 'type', atcacert_cert_type_t)
_atca | cert_convert_enum(kwargs, 'sn_source', atcacert_cert_sn_src_t)
_atcacert_convert_enum(kwargs, 'issue_date_format', atcacert_date_format_t) |
_atcacert_convert_enum(kwargs, 'expire_date_format', atcacert_date_format_t)
_atcacert_convert_bytes(kwargs, 'cert_template', POINTER(c_uint8))
for f in self._fields_:
if type(f[1]) == type(Structure):
_atcacert_convert_structure(kwargs, f[0], f[1])
if type(f[1]) == type(Array):
_atcacert_convert_array(kwargs, f[0], f[1])
super(atcacert_def_t, self).__init__(*args, **kwargs)
|
will-iam/Variant | casepy/eulerRuO2/nNoh4194304x1/chars.py | Python | mit | 473 | 0.012685 | import sys, os
sys.path.insert(1, os.path.join(sys.path[0], '../../../'))
import script.rio as io
import scrip | t.initial_condition.noh1D as noh1D
# Domain properties
lx = 1.0
ly = 1.0
Nx = 4194304
Ny = 1
# Scheme execution options
T = 0.6
CFL = 0.5
gamma = 5./3.
BClayer = 1
quantityList = ['rho', 'rhou_x', 'rhou_y', 'rhoE']
def buildme(quantityDict, coords_to_uid, coords_to_bc):
noh1D.bui | ld(quantityDict, coords_to_uid, coords_to_bc, Nx, Ny, lx, ly, BClayer)
|
FrodeSolheim/fs-uae-launcher | amitools/scan/ADFSScanner.py | Python | gpl-2.0 | 2,106 | 0.017094 | """Scan an ADF image an visit all files"""
import io
from amitools.fs.blkdev.BlkDevFactory import BlkDevFactory
from amitools.fs.ADFSVolume import ADFSVolume
class ADF | SScanner:
def __init__(self):
self.factory = BlkDevFactory()
def can_handle(self, scan_file):
base_name | = scan_file.get_basename().lower()
for ext in self.factory.valid_extensions:
if base_name.endswith(ext):
return True
return False
def handle(self, scan_file, scanner):
if scan_file.is_seekable():
sf = scan_file
else:
sf = scanner.promote_scan_file(scan_file, seekable=True)
# create blkdev
blkdev = self.factory.open(sf.get_local_path(), fobj=sf.get_fobj())
# create volume
volume = ADFSVolume(blkdev)
volume.open()
# scan volume
node = volume.get_root_dir()
ok = self._scan_node(sf, scanner, node)
# done
volume.close()
blkdev.close()
return ok
def _scan_node(self, scan_file, scanner, node):
if node.is_dir():
# recurse into dir
entries = node.get_entries()
for e in entries:
ok = self._scan_node(scan_file, scanner, e)
if not ok:
return False
return True
elif node.is_file():
# read file in ram fobj
data = node.get_file_data()
node.flush()
size = len(data)
path = node.get_node_path_name().get_unicode()
fobj = io.StringIO(data)
sf = scan_file.create_sub_path(path, fobj, size, True, False)
ok = scanner.scan_obj(sf)
sf.close()
return True
# mini test
if __name__ == '__main__':
import sys
from .FileScanner import FileScanner
ifs = ['*.txt']
def handler(scan_file):
print(scan_file)
return True
def skip_handler(scan_file):
print(("SKIP:", scan_file))
return True
def error_handler(scan_file, error):
print(("FAILED:", scan_file, error))
raise error
scanners = [ADFSScanner()]
fs = FileScanner(handler, ignore_filters=ifs, error_handler=error_handler,
scanners=scanners, skip_handler=skip_handler)
for a in sys.argv[1:]:
fs.scan(a)
|
LLNL/spack | var/spack/repos/builtin/packages/bird/package.py | Python | lgpl-2.1 | 1,198 | 0.002504 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Bird(AutotoolsPackage):
"""The BIRD project aims to develop a dynamic IP routing daemon with
full support of all modern routing protocols, easy to use
configuration interface and powerfu | l route filtering language,
primarily targeted on (but not limited to) Linux and other UNIX-like
systems and distributed under the GNU General Public License."""
homepage = "https://bird.network.cz/"
url = "https://github.com/BIRD/bird/archive/v2.0.2.tar.gz"
version('2.0.2', sha256='bd42d48fbcc2c0046d544 | f1183cd98193ff15b792d332ff45f386b0180b09335')
version('2.0.1', sha256='cd6ea4a39ca97ad16d364bf80f919f0e75eba02dd7fe46be40f55d78d022244a')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('flex', type='build')
depends_on('bison', type='build')
depends_on('ncurses')
depends_on('readline')
|
rjschwei/WALinuxAgent | tests/common/osutil/test_nsbsd.py | Python | apache-2.0 | 3,180 | 0.003145 | # Copyright 2019 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
from azurelinuxagent.common.utils.fileutil import read_file
from azurelinuxagent.common.osutil.nsbsd import NSBSDOSUtil
from tests.tools import AgentTestCase, patch
from os import path
import unittest
class TestNSBSDOSUtil(AgentTestCase):
dhclient_pid_file = "/var/run/dhclient.pid"
def setUp(self):
AgentTestCase.setUp(self)
def tearDown(self):
AgentTestCase.tearDown(self)
def test_get_dhcp_pid_should_return_a_list_of_pids(self):
with patch.object(NSBSDOSUtil, "resolver"): # instantiating NSBSDOSUtil requires a resolver
original_isfile = path.isfile
def mock_isfile(path):
return True if path == self.dhclient_pid_file else original_isfile(path)
original_read_file = read_file
def mock_read_file(file, *args, **kwargs):
return "123" if file == self.dhclient_pid_file else original_read_file(file, *args, **kwargs)
with patch("os.path.isfile", mock_isfile):
with patch("azurelinuxagent.common.osutil.nsbsd.fileutil.read_file", mock_read_file):
pid_list = NSBSDOSUtil().get_dhcp_pid()
self.assertEquals(pid_list, [123])
def test_get_dhcp_pid_should_return_an_empty_list_when_the_dhcp_client_is_not_running(self):
with patch.object(NSBSDOSUtil, "resolver"): # instantiating NSBSDOSUtil requires a resolver
#
# PID file does not exist
#
original_isfile = path.isfile
def mock_isfile(path):
return False if path == self.dhclient_pid_file else original_isfile(path)
with patch("os.path.isfile", mock_isfile):
| pid_list = NSBSDOSUtil().get_dhcp_pid()
self.assertEquals(pid_list, [])
#
# PID file is empty
#
original_isfile = path.isfile
def mock_isfile(path):
return True if path == self.dhclient_pid_file else original_isfile(path)
original_read_file = read_file
| def mock_read_file(file, *args, **kwargs):
return "" if file == self.dhclient_pid_file else original_read_file(file, *args, **kwargs)
with patch("os.path.isfile", mock_isfile):
with patch("azurelinuxagent.common.osutil.nsbsd.fileutil.read_file", mock_read_file):
pid_list = NSBSDOSUtil().get_dhcp_pid()
self.assertEquals(pid_list, [])
if __name__ == '__main__':
unittest.main()
|
timm/timmnix | pypy3-v5.5.0-linux64/lib-python/3/test/test_urllib2.py | Python | mit | 61,813 | 0.001844 | import unittest
from test import support
import os
import io
import socket
import array
import sys
import urllib.request
# The proxy bypass method imported below has logic specific to the OSX
# proxy config data structure but is testable on all platforms.
from urllib.request import Request, OpenerDirector, _proxy_bypass_macosx_sysconf
import urllib.error
# XXX
# Request
# CacheFTPHandler (hard to write)
# parse_keqv_list, parse_http_list, HTTPDigestAuthHandler
class TrivialTests(unittest.TestCase):
def test___all__(self):
# Verify which names are exposed
for module in 'request', 'response', 'parse', 'error', 'robotparser':
context = {}
exec('from urllib.%s import *' % module, context)
del context['__builtins__']
if module == 'request' and os.name == 'nt':
u, p = context.pop('url2pathname'), context.pop('pathname2url')
self.assertEqual(u.__module__, 'nturl2path')
self.assertEqual(p.__module__, 'nturl2path')
for k, v in context.items():
self.assertEqual(v.__module__, 'urllib.%s' % module,
"%r is exposed in 'urllib.%s' but defined in %r" %
(k, module, v.__module__))
def test_trivial(self):
# A couple trivial tests
self.assertRaises(ValueError, urllib.request.urlopen, 'bogus url')
# XXX Name hacking to get this to work on Windows.
fname = os.path.abspath(urllib.request.__file__).replace('\\', '/')
if os.name == 'nt':
file_url = "file:///%s" % fname
else:
file_url = "file://%s" % fname
f = urllib.request.urlopen(file_url)
f.read()
f.close()
def test_parse_http_list(self):
tests = [
('a,b,c', ['a', 'b', 'c']),
('path"o,l"og"i"cal, example', ['path"o,l"og"i"cal', 'example']),
('a, b, "c", "d", "e,f", g, h',
['a', 'b', '"c"', '"d"', '"e,f"', 'g', 'h']),
('a="b\\"c", d="e\\,f", g="h\\\\i"',
['a="b"c"', 'd="e,f"', 'g="h\\i"'])]
for string, list in tests:
self.assertEqual(urllib.request.parse_http_list(string), list)
def test_URLError_reasonstr(self):
err = urllib.error.URLError('reason')
self.assertIn(err.reason, str(err))
class RequestHdrsTests(unittest.TestCase):
def test_request_headers_dict(self):
"""
The Request.headers dictionary is not a documented interface. It
should stay that way, because the complete set of headers are only
accessible through the .get_header(), .has_header(), .header_items()
interface. However, .headers pre-dates those methods, and so real code
will be using the dictionary.
The introduction in 2.4 of those methods was a mistake for the same
reason: code that previously saw all (urllib2 user)-provided headers in
.headers now sees only a subset.
"""
url = "http://example.com"
self.assertEqual(Request(url,
headers={"Spam-eggs": "blah"}
).headers["Spam-eggs"], "blah")
self.assertEqual(Request(url,
headers={"spam-EggS": "blah"}
).headers["Spam-eggs"], "blah")
def test_request_headers_methods(self):
"""
Note the case normalization of header names here, to
.capitalize()-case. This should be preserved for
backwards-compatibility. (In the HTTP case, normalization to
.title()-case is done by urllib2 before sending headers to
http.client).
Note that e.g. r.has_header("spam-EggS") is currently False, and
r.get_header("spam-EggS") returns None, but that could be changed in
future.
Method r.remove_header should remove items both from r.headers and
r.unredirected_hdrs dictionaries
"""
url = "http://example.com"
req = Request(url, headers={"Spam-eggs": "blah"})
self.assertTrue(req.has_header("Spam-eggs"))
self.assertEqual(req.header_items(), [('Spam-eggs', 'blah')])
req.add_header("Foo-Bar", "baz")
self.assertEqual(sorted(req.header_items()),
[('Foo-bar', 'baz'), ('Spam-eggs', 'blah')])
self.assertFalse(req.has_header("Not-there"))
self.assertIsNone(req.get_header("Not-there"))
self.assertEqual(req.get_header("Not-there", "default"), "default")
def test_password_manager(self):
mgr = urllib.request.HTTPPasswordMgr()
add = mgr.add_password
find_user_pass = mgr.find_user_password
add("Some Realm", "http://example.com/", "joe", "password")
add("Some Realm", "http://example.com/ni", "ni", "ni")
add("c", "http://example.com/foo", "foo", "ni")
add("c", "http://example.com/bar", "bar", "nini")
add("b", "http://example.com/", "first", "blah")
add("b", "http://example.com/", "second", "spam")
add("a", "http://example.com", "1", "a")
add("Some Realm", "http://c.example.com:3128", "3", "c")
add("Some Realm", "d.example.com", "4", "d")
add("Some Realm", "e.example.com:3128", "5", "e")
self.assertEqual(find_user_pass("Some Realm", "example.com"),
('joe', 'password'))
#self.assertEqual(find_user_pass("Some Realm", "http://example.com/ni"),
# ('ni', 'ni'))
self.assertEqual(find_user_pass("Some Realm", "http://example.com"),
('joe', 'password'))
self.assertEqual(find_user_pass("Some Realm", "http://example.com/"),
('joe', 'password'))
self.assertEqual(
find_user_pass("Some Realm", "http://example.com/spam"),
('joe', 'password'))
self.assertEqual(
find_user_pass("Some Realm", "http://example.com/spam/spam"),
('joe', 'password'))
self.assertEqual(find_user_pass("c", "http://example.com/foo"),
('foo', 'ni'))
self.assertEqual(find_user_pass("c", "http://example.com/bar"),
('bar', 'nini'))
self.assertEqual(find_user_pass("b", "http://example.com/"),
('second', 'spam'))
# No special relationship between a.example.com and example.com:
self.assertEqual(find_user_pass("a", "http://example.com/"),
('1', 'a'))
self.assertEqual(find_user_pass("a", "http://a.example.com/"),
(None, None))
# Ports:
self.assertEqual(find_user_pass("Some Realm", "c.example.com"),
(None, None))
self.a | ssertEqual(find_user_pass("Some Realm", "c.e | xample.com:3128"),
('3', 'c'))
self.assertEqual(
find_user_pass("Some Realm", "http://c.example.com:3128"),
('3', 'c'))
self.assertEqual(find_user_pass("Some Realm", "d.example.com"),
('4', 'd'))
self.assertEqual(find_user_pass("Some Realm", "e.example.com:3128"),
('5', 'e'))
def test_password_manager_default_port(self):
"""
The point to note here is that we can't guess the default port if
there's no scheme. This applies to both add_password and
find_user_password.
"""
mgr = urllib.request.HTTPPasswordMgr()
add = mgr.add_password
find_user_pass = mgr.find_user_password
add("f", "http://g.example.com:80", "10", "j")
add("g", "http://h.example.com", "11", "k")
add("h", "i.example.com:80", "12", "l")
add("i", "j.example.com", "13", "m")
self.assertEqual(find_user_pass("f", "g.example.com:100"),
(None, None))
self.assertEqual(find_user_pass("f", "g.example.com:80"),
('10', 'j'))
self.assertEqual(find_user_pass("f", "g.example.com"),
(None, Non |
pascalweiss/SearchEngine | model/test_model.py | Python | cc0-1.0 | 1,742 | 0.002308 | import unittest
import os
from model.index import Index
from model.page import Page
class TestPage(unittest.TestCase):
def test_str_outputStringIsAsExpected(self):
test_page = Page()
test_page.title = 'D01'
test_page.content = 'Bla Bla Blub'
expected_output = os.linesep.join([
'---------------------------------------------------------------------------',
'D01',
'---------------------------------------------------------------------------',
'Bla Bla Blub'
])
self.assertEqual(expected_output, str(test_page))
class TestIndex(unittest.TestCase):
def test_createDummyIndex_str_outputStringIsAsExpected(self):
expected_output = os.linesep.join([
'index.txt',
'¯¯¯¯¯¯¯¯¯',
'(DaDaDa, df:1) -> [(\'d01\', 2)]',
'(DumDiDum, df:2) -> [(\'d02\', 1), (\'d03\', 1)]',
''
])
self.assertEqual(expected_output, str(self.get_dummy_index()))
def test_get_posting_list_postingListIsComplete(self):
| index = self.get_dummy_index()
posting_list = index.get_posting_list('daDadA')
self.assertListEqual(posting_list, ['d01'])
posting_list = index.get_posting_list('DaDaDa', 'dumDIDum')
self.assertListEqua(posting_list, ['d{0:02}'.format(n) for n in range(1, 4)])
def get_dummy_index(self):
dummyIndex = Index()
dummyIndex.register_Token('DaDaDa', 'd01')
dummyIndex.register_Toke | n('DaDaDa', 'd01')
dummyIndex.register_Token('DumDiDum', 'd02')
dummyIndex.register_Token('DumDiDum', 'd03')
return dummyIndex
if __name__ == '__main__':
unittest.main()
|
marcin-bakowski-dev/rest-api-mock-server | mock_rest_app/mock_api/callbacks.py | Python | mit | 710 | 0.004225 | import logging
from django.conf import settings
from mock_api.h | ttp_utils import make_request
logger = logging.getLogger(__name__)
def run_api_endpoint_callbacks(api_endpoint):
responses = []
for api_callback in api_endpoint.callbacks.all():
logger.debug("Make callback: %s", api_callback)
response = make_request(method=api_callback.method, url=api_callback.url, params=api_callback.get_params(),
headers=api_callback.get_headers(), timeout=settings.DEFAULT_CALL | BACK_REQUEST_TIMEOUT)
if response:
logger.debug("Callback response status code: %s", response.status_code)
responses.append(response)
return responses
|
dud225/incubator-airflow | airflow/contrib/hooks/gcp_dataproc_hook.py | Python | apache-2.0 | 5,322 | 0.000752 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import time
import uuid
from apiclient.discovery import build
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
class _DataProcJob:
def __init__(self, dataproc_api, project_id, job):
self.dataproc_api = dataproc_api
self.project_id = project_id
self.job = dataproc_api.projects().regions().jobs().submit(
projectId=self.project_id,
region='global',
body=job).execute()
self.job_id = self.job['reference']['jobId']
logging.info('DataProc job %s is %s', self.job_id,
str(self.job['status']['state']))
def wait_for_done(self):
while True:
self.job = self.dataproc_api.projects().regions().jobs().get(
projectId=self.project_id,
region='global',
jobId=self.job_id).execute()
if 'ERROR' == self.job['status']['state']:
print(str(self.job))
logging.error('DataProc job %s has errors', self.job_id)
logging.error(self.job['status']['details'])
logging.debug(str(self.job))
return False
if 'CANCELLED' == self.job['status']['state']:
print(str(self.job))
logging.warning('DataProc job %s is cancelled', self.job_id)
if 'details' in self.job['status']:
logging.warning(self.job['status']['details'])
logging.debug(str(self.job))
return False
if 'DONE' == self.job['status']['state']:
return True
logging.debug('DataProc job %s is %s', self.job_id,
| str(self.job['status']['state']))
time.sleep(5)
def raise_error(self, message=None):
if 'ERROR' == self.job['status']['state']:
if message is None:
message = "Google DataProc job has error"
raise Exception(message + ": " + str(self.job['status']['details']))
| def get(self):
return self.job
class _DataProcJobBuilder:
def __init__(self, project_id, task_id, dataproc_cluster, job_type, properties):
name = task_id + "_" + str(uuid.uuid1())[:8]
self.job_type = job_type
self.job = {
"job": {
"reference": {
"projectId": project_id,
"jobId": name,
},
"placement": {
"clusterName": dataproc_cluster
},
job_type: {
}
}
}
if properties is not None:
self.job["job"][job_type]["properties"] = properties
def add_variables(self, variables):
if variables is not None:
self.job["job"][self.job_type]["scriptVariables"] = variables
def add_args(self, args):
if args is not None:
self.job["job"][self.job_type]["args"] = args
def add_query(self, query):
self.job["job"][self.job_type]["queryList"] = {'queries': [query]}
def add_jar_file_uris(self, jars):
if jars is not None:
self.job["job"][self.job_type]["jarFileUris"] = jars
def add_archive_uris(self, archives):
if archives is not None:
self.job["job"][self.job_type]["archiveUris"] = archives
def set_main(self, main_jar, main_class):
if main_class is not None and main_jar is not None:
raise Exception("Set either main_jar or main_class")
if main_jar:
self.job["job"][self.job_type]["mainJarFileUri"] = main_jar
else:
self.job["job"][self.job_type]["mainClass"] = main_class
def set_python_main(self, main):
self.job["job"][self.job_type]["mainPythonFileUri"] = main
def build(self):
return self.job
class DataProcHook(GoogleCloudBaseHook):
def __init__(self,
gcp_conn_id='google_cloud_default',
delegate_to=None):
super(DataProcHook, self).__init__(gcp_conn_id, delegate_to)
def get_conn(self):
"""
Returns a Google Cloud DataProc service object.
"""
http_authorized = self._authorize()
return build('dataproc', 'v1', http=http_authorized)
def submit(self, project_id, job):
submitted = _DataProcJob(self.get_conn(), project_id, job)
if not submitted.wait_for_done():
submitted.raise_error("DataProcTask has errors")
def create_job_template(self, task_id, dataproc_cluster, job_type, properties):
return _DataProcJobBuilder(self.project_id, task_id, dataproc_cluster, job_type,
properties)
|
aaxelb/osf.io | admin_tests/base/test_utils.py | Python | apache-2.0 | 3,856 | 0.000519 | from nose.tools import * # flake8: noqa
from django.db.models import Q
from tests.base import AdminTestCase
from osf_tests.factories import SubjectFactory
from osf.models import Subject
from osf.models.preprint_provider import rules_to_subjects
from admin.base.utils import get_subject_rules
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class TestSubjectRules(AdminTestCase):
def setUp(self):
super(TestSubjectRules, self).setUp()
self.parent_one = SubjectFactory() # 0
self.parent_two = SubjectFactory() # 1
self.child_one_1 = SubjectFactory(parent=self.parent_one) # 2
self.child_one_2 = SubjectFactory(parent=self.parent_one) # 3
self.grandchild_one_1 = SubjectFactory(parent=self.child_one_1) # 4
self.grandchild_one_2 = SubjectFactory(parent=self.child_one_1) # 5
self.child_two_1 = SubjectFactory(parent=self.parent_two) # 6
self.child_two_2 = SubjectFactory(parent=self.parent_two) # 7
def test_error_when_child_called_without_parent(self):
subjects_selected = [self.child_one_1]
with self.assertRaises(AttributeError):
get_subject_rules(subjects_selected)
def test_just_toplevel_subject(self):
subjects_selected = [self.parent_one]
rules_returned = get_subject_rules(subjects_selected)
rules_ideal = [[[self.parent_one._id], False]]
self.assertItemsEqual(rules_returned, rules_ideal)
def test_two_toplevel_subjects(self):
subjects_selected = [
self.parent_one,
self.parent_two
]
rules_returned = get_subject_rules(subjects_selected)
rules_ideal = [
[[self.parent_one._id], False],
[[self.parent_two._id], False]
]
self.assertItemsEqual(rules_returned, rules_ideal)
def test_one_child(self):
subjects_selected = [
self.parent_one,
self.child_one_1
]
rules_returned = get_subject_rules(subjects_selected)
rules_ideal = [[[self.parent_one._id, self.child_one_1._id], False]]
self.assertItemsEqual(rules_returned, rules_ideal)
def test_one_child_all_grandchildren(self):
subjects_selected = [
self.parent_one,
self.child_one_1,
self.grandchild_one_1,
self.grandchild_one_2,
]
rules_returned = get_subject_rules(subjects_selected)
rules_ideal = [[[self.parent_one._id, self.child_one_1._id], True]]
self.assertItemsEqual(rules_returned, rules_ideal)
def test_all_children_all_grandchildren(self):
subjects_selected = [
self.parent_one,
self.child_one_1,
self.grandchild_one_1,
self.grandchild_one_2,
self.child_one_2
]
rules_returned = get_subject_rules(subjects_selected)
rules_ideal = [[[self.parent_one._id], True]]
self.assertItemsEqual(rules_returned, rules_ideal)
def test_one_child_with_one_grandchild(self):
subjects_selected = [
self.parent_one,
self.child_one_1,
self.grandchild_one_1
]
rules_returned = get_subject_rules(subjects_selected)
rules_ideal = [
[[self.parent_one._id, self.child_one_1._id, self.grandchild_one_1._id], False]
]
self.assertItemsEqual(rules_returned, rules_ideal)
def test_rules_to_subjects(self):
| rules = [
[ | [self.parent_one._id, self.child_one_1._id], False]
]
subject_queryset_ideal = Subject.objects.filter(Q(id=self.parent_one.id) | Q(id=self.child_one_1.id))
returned_subjects = rules_to_subjects(rules)
self.assertItemsEqual(subject_queryset_ideal, returned_subjects)
|
krishnaku/ec2x | ec2x/console.py | Python | mit | 1,600 | 0.000625 | from . import name_binding, tasks
import argh
from argh.decorators import arg
from fabric.tasks import execute
from functools import reduce
def rehash():
"""Update the binding of instance names to instance_id and public_dns_names
and also update the name_binding cache.
"""
name_binding.export_name_bindings_to_file()
@arg('instance_name', help='name of the ec2 instance')
def ssh(instance_name):
"""ssh into a remote ec2 instance by name.
Example: ec2x ssh <instance_name>
"""
execute(tasks.ssh, instance_name)
@arg('instance_name', help='name of the ec2 instance')
@arg('command', help='command to be executed')
def remote(instance_name, *command):
"""Execute a remote ssh command on the named instance
Examples:
ec2x remote | <instance_name> ps ax
:show the processes running on the remote instance <instance>
ec2x remote <instance_name> \"ps ax | grep ssh\"
:run the piped command sequence on the remote instance
"""
execute(tasks.remote_run,
_concat(command),
instance_name)
@arg('command', help='commmand to be executed')
def local(*command):
"""Execute a local command with the ec2 na | me bindings in the environment
Example: ec2x local echo "The public dns name of the instance named euler is " $euler
"""
tasks.local(_concat(command))
def _concat(command):
return reduce(lambda out, s: out + s + ' ', command, '')
def __main__():
argh.dispatch_commands(
[
ssh,
remote,
local,
rehash
])
|
wattlebird/ranking | rankit/Ranker/__init__.py | Python | mit | 344 | 0.008721 | from .UnsupervisedRanker import MasseyRanker, ColleyRanker, Keene | rRanker, MarkovRanker, ODRanker, DifferenceRanker
from .TimeSeriesRanker import EloRanker, TrueSkillRanker, GlickoRanker
__all__ = ["MasseyRanker", " | ColleyRanker", "KeenerRanker", "MarkovRanker", "ODRanker", "DifferenceRanker", "EloRanker", "TrueSkillRanker", "GlickoRanker"] |
Chris35Wills/Chris35Wills.github.io | courses/examples/Beginners_python/conditions.py | Python | mit | 826 | 0.01937 | # Past examples are programmatically insecure
# You require arguments to be passed in but what if the wrong arguments are provided?
# Look at the timestable solution which changes numbers to text - what happens | if you provide the number 30?
#
# One way of controlling these things uses conditions
# These enable specific operations to be carried out "if" something is the case or "else" something else is the case
a = 5
# first condition trial
if a >= 5:
print("Value is greater than 5")
else:
print("Value is less than 5")
# second condition trial
if a >= 5:
print("Value is greater than 5")
elif a < 5:
print("Value is less than 5")
else:
print("Value | is 5")
# if and (2 conditions)
a=3
b=5
if (a==3) and (b==5):
print("a and b are as expected - great :)")
else:
print("a and b not as expected - not great :(") |
adamreeve/npTDMS | nptdms/utils.py | Python | lgpl-3.0 | 1,733 | 0.001154 | from functools import wraps
import logging
import time
try:
from collections import OrderedDict
except ImportError:
try:
# ordereddict available on py | pi for P | ython < 2.7
from ordereddict import OrderedDict
except ImportError:
# Otherwise fall back on normal dict
OrderedDict = dict
def cached_property(func):
""" Wraps a method on a class to make it a property and caches the result the first time it is evaluated
"""
attr_name = '_cached_prop_' + func.__name__
@property
@wraps(func)
def get(self):
try:
return getattr(self, attr_name)
except AttributeError:
value = func(self)
setattr(self, attr_name, value)
return value
return get
class Timer(object):
""" Context manager for logging the time taken for an operation
"""
def __init__(self, log, description):
self._enabled = log.isEnabledFor(logging.INFO)
self._log = log
self._description = description
self._start_time = None
def __enter__(self):
if not self._enabled:
return self
try:
self._start_time = time.perf_counter()
except AttributeError:
# Python < 3.3
self._start_time = time.clock()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self._enabled:
return
try:
end_time = time.perf_counter()
except AttributeError:
# Python < 3.3
end_time = time.clock()
elapsed_time = (end_time - self._start_time) * 1.0e3
self._log.info("{0}: Took {1} ms".format(self._description, elapsed_time))
|
raildo/keystone | keystone/tests/test_utils.py | Python | apache-2.0 | 8,700 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import functools
import logging
import os
import time
import uuid
from six.moves import cStringIO
from testtools import matchers
from keystone.common import utils
from keystone import tests
TZ = None
def timezone(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
tz_original = os.environ.get('TZ')
try:
if TZ:
os.environ['TZ'] = TZ
time.tzset()
return func(*args, **kwargs)
finally:
if TZ:
if tz_original:
os.environ['TZ'] = tz_original
else:
if 'TZ' in os.environ:
del os.environ['TZ']
time.tzset()
return wrapper
class UtilsTestCase(tests.TestCase):
def test_hash(self):
password = 'right'
wrong = 'wrongwrong' # Two wrongs don't make a right
hashed = utils.hash_password(password)
self.assertTrue(utils.check_password(password, hashed))
self.assertFalse(utils.check_password(wrong, hashed))
def test_hash_long_password(self):
bigboy = '0' * 9999999
hashed = utils.hash_password(bigboy)
self.assertTrue(utils.check_password(bigboy, hashed))
def test_hash_edge_cases(self):
hashed = utils.hash_password('secret')
self.assertFalse(utils.check_password('', hashed))
self.assertFalse(utils.check_password(None, hashed))
def test_hash_unicode(self):
password = u'Comment \xe7a va'
wrong = 'Comment ?a va'
hashed = utils.hash_password(password)
self.assertTrue(utils.check_password(password, hashed))
self.assertFalse(utils.check_password(wrong, hashed))
def test_auth_str_equal(self):
self.assertTrue(utils.auth_str_equal('abc123', 'abc123'))
self.assertFalse(utils.auth_str_equal('a', 'aaaaa'))
self.assertFalse(utils.auth_str_equal('aaaaa', 'a'))
self.assertFalse(utils.auth_str_equal('ABC123', 'abc123'))
def test_unixtime(self):
global TZ
@timezone
def _test_unixtime():
epoch = utils.unixtime(dt)
self.assertEqual(epoch, epoch_ans, "TZ=%s" % TZ)
dt = datetime.datetime(1970, 1, 2, 3, 4, 56, 0)
epoch_ans = 56 + 4 * 60 + 3 * 3600 + 86400
for d in ['+0', '-11', '-8', '-5', '+5', '+8', '+14']:
TZ = 'UTC' + d
_test_unixtime()
class LimitingReaderTests(tests.TestCase):
def test_read_default_value(self):
class FakeData(object):
def read(self, *args, **kwargs):
self.read_args = args
self.read_kwargs = kwargs
return 'helloworld'
data = FakeData()
utils.LimitingReader(data, 100)
self.assertEqual(data.read(), 'helloworld')
self.assertEqual(len(data.read_args), 0)
self.assertEqual(len(data.read_kwargs), 0)
self.assertEqual(data.read(10), 'helloworld')
self.assertEqual(len(data.read_args), 1)
self.assertEqual(len(data.read_kwargs), 0)
self.assertEqual(data.read_args[0], 10)
class TestDeprecated(tests.TestCase):
def setUp(self):
super(TestDeprecated, self).setUp()
self.deprecated_message = cStringIO()
self.handler = logging.StreamHandler(self.deprecated_message)
self.logger = logging.getLogger('keystone.common.utils')
self.logger.addHandler(self.handler)
def tearDown(self):
super(TestDeprecated, self).tearDown()
self.logger.removeHandler(self.handler)
def test_deprecating_a_function_returns_correct_value(self):
@utils.deprecated(as_of=utils.deprecated.ICEHOUSE)
def do_outdated_stuff(data):
return data
expected_rv = uuid.uuid4().hex
retval = do_outdated_stuff(expected_rv)
self.assertThat(retval, matchers | .Equals(expected_rv))
def test_deprecating_a_method_returns_correct_value(self):
class C(object):
@utils.deprecated(as_of=utils.deprecated.ICEHOUSE)
def outdated_method(self, *args):
return ar | gs
retval = C().outdated_method(1, 'of anything')
self.assertThat(retval, matchers.Equals((1, 'of anything')))
def test_deprecated_with_unknown_future_release(self):
@utils.deprecated(as_of=utils.deprecated.ICEHOUSE,
in_favor_of='different_stuff()')
def do_outdated_stuff():
return
do_outdated_stuff()
expected = ('do_outdated_stuff() is deprecated as of Icehouse '
'in favor of different_stuff() and may be removed in K.')
self.assertThat(self.deprecated_message.getvalue(),
matchers.Contains(expected))
def test_deprecated_with_known_future_release(self):
@utils.deprecated(as_of=utils.deprecated.GRIZZLY,
in_favor_of='different_stuff()')
def do_outdated_stuff():
return
do_outdated_stuff()
expected = ('do_outdated_stuff() is deprecated as of Grizzly '
'in favor of different_stuff() and may be removed in '
'Icehouse.')
self.assertThat(self.deprecated_message.getvalue(),
matchers.Contains(expected))
def test_deprecated_without_replacement(self):
@utils.deprecated(as_of=utils.deprecated.GRIZZLY)
def do_outdated_stuff():
return
do_outdated_stuff()
expected = ('do_outdated_stuff() is deprecated as of Grizzly '
'and may be removed in Icehouse. It will not be '
'superseded.')
self.assertThat(self.deprecated_message.getvalue(),
matchers.Contains(expected))
def test_deprecated_with_custom_what(self):
@utils.deprecated(as_of=utils.deprecated.GRIZZLY,
what='v2.0 API',
in_favor_of='v3 API')
def do_outdated_stuff():
return
do_outdated_stuff()
expected = ('v2.0 API is deprecated as of Grizzly in favor of '
'v3 API and may be removed in Icehouse.')
self.assertThat(self.deprecated_message.getvalue(),
matchers.Contains(expected))
def test_deprecated_with_removed_next_release(self):
@utils.deprecated(as_of=utils.deprecated.GRIZZLY,
remove_in=1)
def do_outdated_stuff():
return
do_outdated_stuff()
expected = ('do_outdated_stuff() is deprecated as of Grizzly '
'and may be removed in Havana. It will not be '
'superseded.')
self.assertThat(self.deprecated_message.getvalue(),
matchers.Contains(expect |
lyndonnixon/LSI | enricher.py | Python | apache-2.0 | 1,636 | 0.003667 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""Periodically enrich the metadata of media resources by invoking Named Entity Recognition Services.
.. moduleauthor:: Dong Liu <liu.dong66@gmail.com>
"""
import config
import esclient
import ner
import model
import json
import logging
import lsilog
from time import sleep
def enrich(media_count):
"""Enrich the metadata of a number of media resources.
Args:
media_count (int): The number of the media resources to be enriched.
"""
elastic_search_client = esclient.ElasticSearchClient(config.elastic_search_endpoint)
enriched_count = 0
enriched_video = 0
enriched_image = 0
| while True:
if enriched_count >= media_count:
break
media_list = elastic_search_client.find_media_by_status('crawled')
for media in ner.enrich(media_list, keep_ner_log=True):
media.status = 'enriched'
sleep(1)
if isinstance(media, model.VideoTrack):
enriched_video += 1
elastic_search_client.update_video(media.id, json.dumps(media, cls=model. | VideoEncoder))
elif isinstance(media, model.Image):
enriched_image += 1
elastic_search_client.update_image(media.id, json.dumps(media, cls=model.ImageEncoder))
enriched_count += 1
logger = logging.getLogger("enricher")
logger.info('Enriched ' + str(enriched_count) + ' meida resources: ' + str(enriched_video) + ' videos and ' + str(enriched_image) + ' images')
if __name__ == '__main__':
lsilog.init_logging_sys()
enrich(1500)
logging.shutdown()
|
rebortyang/flask_blog | app/auth/__init__.py | Python | mit | 111 | 0.036036 | _ | _author__ = 'yangjiebin'
from flask import Blueprint
auth = Blueprint('auth',__name__)
from . import views | |
alexissmirnov/donomo | donomo_archive/lib/reportlab/test/test_pdfbase_encodings.py | Python | bsd-3-clause | 10,317 | 0.00979 | from reportlab.test import unittest
from reportlab.test.utils import makeSuiteForClasses, outputfile, printLocation, NearTestCase
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase import pdfutils
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import ParagraphStyle
from reportlab.graphics.shapes import Drawing, String, Ellipse
import re
import codecs
textPat = re.compile(r'\([^(]*\)')
#test sentences
testCp1252 = 'copyright %s trademark %s registered %s ReportLab! Ol%s!' % (chr(169), chr(153),chr(174), chr(0xe9))
testUni = unicode(testCp1252, 'cp1252')
testUTF8 = testUni.encode('utf-8')
# expected result is octal-escaped text in the PDF
expectedCp1252 = pdfutils._escape(testCp1252)
def extractText(pdfOps):
"""Utility to rip out the PDF text within a block of PDF operators.
PDF will show a string draw as something like "(Hello World) Tj"
i.e. text is in curved brackets. Crude and dirty, probably fails
on escaped brackets.
"""
found = textPat.findall(pdfOps)
#chop off '(' and ')'
return map(lambda x:x[1:-1], found)
def subsetToUnicode(ttf, subsetCodeStr):
"""Return unicode string represented by given subsetCode string
as found when TrueType font rendered to PDF, ttf must be the font
object that was used."""
# This relies on TTFont internals and uses the first document
# and subset it finds
subset = ttf.state.values()[0].subsets[0]
chrs = []
for codeStr in subsetCodeStr.split('\\'):
if codeStr:
chrs.append(unichr(subset[int(codeStr[1:], 8)]))
return u''.join(chrs)
class TextEncodingTestCase(NearTestCase):
"""Tests of expected Unicode and encoding behaviour
"""
def setUp(self):
self.luxi = TTFont("Luxi", "luxiserif.ttf")
pdfmetrics.registerFont(self.luxi)
self.styNormal = ParagraphStyle(name='Helvetica', fontName='Helvetica-Oblique')
self.styTrueType = ParagraphStyle(name='TrueType', fontName='luxi')
def testStringWidth(self):
msg = 'Hello World'
self.assertNear(pdfmetrics.stringWidth(msg, 'Courier', 10),66.0)
self.assertNear(pdfmetrics.stringWidth(msg, 'Helvetica', 10),51.67)
self.assertNear(pdfmetrics.stringWidth(msg, 'Times-Roman', 10),50.27)
self.assertNear(pdfmetrics.stringWidth(msg, 'Luxi', 10),50.263671875)
uniMsg1 = u"Hello World"
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Courier', 10),66.0)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Helvetica', 10),51.67)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Times-Roman', 10),50.27)
self.assertNear(pdfmetrics.stringWidth(uniMsg1, 'Luxi', 10),50.263671875)
# Courier are all 600 ems wide. So if one 'measures as utf8' one will
# get a wrong width as extra characters are seen
self.assertEquals(len(testCp1252),52)
self.assertNear(pdfmetrics.stringWidth(testCp1252, 'Courier', 10, 'cp1252'),312.0)
# the test string has 5 more bytes and so "measures too long" if passed to
# a single-byte font which treats it as a single-byte string.
self.assertEquals(len(testUTF8),57)
self.assertNear(pdfmetrics.stringWidth(testUTF8, 'Courier', 10),312.0)
self.assertEquals(len(testUni),52)
self.assertNear(pdfmetrics.stringWidth(testUni, 'Courier', 10),312.0)
# now try a TrueType font. Should be able to accept Unicode or UTF8
self.assertNear(pdfmetrics.stringWidth(testUTF8, 'Luxi', 10),224.638671875)
self.assertNear(pdfmetrics.stringWidth(testUni, 'Luxi', 10),224.63 | 8671875)
def testUtf8Canvas(self):
"""Verify canvas declared as utf8 autoconverts.
This assumes utf8 input. It converts to the encoding of the
underlying font, so both text lines APPEAR the same."""
| c = Canvas(outputfile('test_pdfbase_encodings_utf8.pdf'))
c.drawString(100,700, testUTF8)
# Set a font with UTF8 encoding
c.setFont('Luxi', 12)
# This should pass the UTF8 through unchanged
c.drawString(100,600, testUTF8)
# and this should convert from Unicode to UTF8
c.drawString(100,500, testUni)
# now add a paragraph in Latin-1 in the latin-1 style
p = Paragraph(testUTF8, style=self.styNormal, encoding="utf-8")
w, h = p.wrap(150, 100)
p.drawOn(c, 100, 400) #3
c.rect(100,300,w,h)
# now add a paragraph in UTF-8 in the UTF-8 style
p2 = Paragraph(testUTF8, style=self.styTrueType, encoding="utf-8")
w, h = p2.wrap(150, 100)
p2.drawOn(c, 300, 400) #4
c.rect(100,300,w,h)
# now add a paragraph in Unicode in the latin-1 style
p3 = Paragraph(testUni, style=self.styNormal)
w, h = p3.wrap(150, 100)
p3.drawOn(c, 100, 300)
c.rect(100,300,w,h)
# now add a paragraph in Unicode in the UTF-8 style
p4 = Paragraph(testUni, style=self.styTrueType)
p4.wrap(150, 100)
p4.drawOn(c, 300, 300)
c.rect(300,300,w,h)
# now a graphic
d1 = Drawing(400,50)
d1.add(Ellipse(200,25,200,12.5, fillColor=None))
d1.add(String(200,25,testUTF8, textAnchor='middle', encoding='utf-8'))
d1.drawOn(c, 100, 150)
# now a graphic in utf8
d2 = Drawing(400,50)
d2.add(Ellipse(200,25,200,12.5, fillColor=None))
d2.add(String(200,25,testUTF8, fontName='Luxi', textAnchor='middle', encoding='utf-8'))
d2.drawOn(c, 100, 100)
# now a graphic in Unicode with T1 font
d3 = Drawing(400,50)
d3.add(Ellipse(200,25,200,12.5, fillColor=None))
d3.add(String(200,25,testUni, textAnchor='middle'))
d3.drawOn(c, 100, 50)
# now a graphic in Unicode with TT font
d4 = Drawing(400,50)
d4.add(Ellipse(200,25,200,12.5, fillColor=None))
d4.add(String(200,25,testUni, fontName='Luxi', textAnchor='middle'))
d4.drawOn(c, 100, 0)
extracted = extractText(c.getCurrentPageContent())
self.assertEquals(extracted[0], expectedCp1252)
self.assertEquals(extracted[1], extracted[2])
#self.assertEquals(subsetToUnicode(self.luxi, extracted[1]), testUni)
c.save()
class FontEncodingTestCase(unittest.TestCase):
"""Make documents with custom encodings of Type 1 built-in fonts.
Nothing really to do with character encodings; this is about hacking the font itself"""
def test0(self):
"Make custom encodings of standard fonts"
# make a custom encoded font.
c = Canvas(outputfile('test_pdfbase_encodings.pdf'))
c.setPageCompression(0)
c.setFont('Helvetica', 12)
c.drawString(100, 700, 'The text below should be in a custom encoding in which all vowels become "z"')
# invent a new language where vowels are replaced with letter 'z'
zenc = pdfmetrics.Encoding('EncodingWithoutVowels', 'WinAnsiEncoding')
for ch in 'aeiou':
zenc[ord(ch)] = 'z'
for ch in 'AEIOU':
zenc[ord(ch)] = 'Z'
pdfmetrics.registerEncoding(zenc)
# now we can make a font based on this encoding
# AR hack/workaround: the name of the encoding must be a Python codec!
f = pdfmetrics.Font('FontWithoutVowels', 'Helvetica-Oblique', 'EncodingWithoutVowels')
pdfmetrics.registerFont(f)
c.setFont('FontWithoutVowels', 12)
c.drawString(125, 675, "The magic word is squamish ossifrage")
# now demonstrate adding a Euro to MacRoman, which lacks one
c.setFont('Helvetica', 12)
c.drawString(100, 650, "MacRoman encoding lacks a Euro. We'll make a Mac font with the Euro at #219:")
# WinAnsi Helvetica
pdfmetrics.registerFont(pdfmetrics.Font('Helvetica-WinAnsi', 'Helvetica-Oblique', 'WinAnsiEncoding'))
c.setFont('Helvetica-WinAnsi', 12)
c.drawString(125, 625, 'WinAnsi with Euro: character 128 = "\200"')
pdfmetrics.registerFont(pdfmetrics.Font('Mac |
mathiasose/celery_dill_serializer | celery_dill_serializer/__init__.py | Python | mit | 525 | 0 | from dill import dill
from kombu.serialization import pickle_loads, | pickle_protocol, registry
from kombu.utils.encoding import str_to_bytes
def register_dill():
def encode(obj, dumper=dill.dumps):
return dumper(obj, protocol=pickle_protocol)
def decode(s):
return pickle_loads(str_to_bytes(s), load=dill.load)
registry.register(
name='dill',
encoder=encode,
decoder=decode,
content_type='application/x-python-serialize', |
content_encoding='binary'
)
|
jjuanda/cuco | cuco/SupervisorJob.py | Python | gpl-2.0 | 2,735 | 0.004022 | from jinja2 import Template
class SupervisorJob(object):
"""docstring for SupervisorJob"""
def __init__(self, config):
""" Specify the configuration options for a job.
'config' must be a dictionary containing the following keys:
- env_vars: dict containing the key/value pairs to be specified as
environment variables
e.g. {"ES_HOME" : "/home/es"}
- name: job name, as used by Supervisor to uniquely identify the job
e.g. "elasticsearch"
- base_dir: Base directory for the supervisor job
e.g. "/home/es"
- cmd: Full command (with args) that supervisor will run
e.g. "elasticsearch -p /home/es/es.pid"
- stdout_file: Full path to the file where stdout will be dumped to by
Supervisor
e.g. "/home/es/logs/es.out"
- stderr_file: Full path to the file where stderr will be dumped to by
Supervisor
e.g. "/home/es/logs/es.err"
"""
super(SupervisorJob, self).__init__()
self.config = config
self['env_vars'] = config.get('env_vars', {})
# self.env_vars = env_vars
# self.base_dir = base_dir
# self.cmd = cmd
# self.name = name
# self.stdout_file = stdout_file
# self.stderr_file = stderr_file
def prepare(self):
raise NotImplementedError("base class")
def __getitem__(self, k):
return self.config[k]
def __setitem__(self, k, v):
self.config[k] = v
def __contains__(self, k):
return k in self.config
def add_env(self, k, v):
self['env_vars'][k] = v
def as_supervisor_program(self):
config = """[program:{{program_name}}]
co | mmand = {{cmd}}
directory = {{base_dir}}
autostart = true
autorestart = true
stopsignal = KILL
killasgroup = true
stopasgroup = true
environment = {{env}}
std | out_logfile = {{stdout}}
stderr_logfile = {{stderr}}
"""
env = Template(config)
return env.render({
"program_name" : self.config['name'],
"base_dir" : self.config['base_dir'],
"env" : self.get_env_str(),
"cmd" : self.config['cmd'],
"stdout" : self.config['stdout_file'],
"stderr" : self.config['stderr_file'],
})
def as_exports(self):
res = ""
for k, v in self['env_vars'].items():
res += "export %s=%s\n" % (k, v)
return res
def get_env_str(self):
return ", ".join([k + "=\"" + str(v) + "\"" for k,v in self['env_vars'].items()])
|
psydrake/bicreditsnew | qa/rpc-tests/txn_doublespend.py | Python | mit | 4,976 | 0.002814 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcredit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with malleable transactions
#
from test_framework import BitcreditTestFramework
from bitcreditrpc.authproxy import AuthServiceProxy, JSONRPCException
from decimal import Decimal
from util import *
import os
import shutil
class TxnMallTest(BitcreditTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].move("", "foo", 1220)
self.nodes[0].move("", "bar", 30)
assert_equal(self.nodes[0].getbalance(""), 0)
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 1210 BTC to node1_address,
# but don't broadcast:
(total_in, inputs) = gather_inputs(self.nodes[0], 1210)
change_address = self.nodes[0].getnewaddress("foo")
outputs = {}
outputs[change_address] = 40
outputs[node1_address] = 1210
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two transaction from node[0] to node[1]; the
# second must spend change from the first because the first
# spends all mature inputs:
txid1 = self.nodes[0].sendfrom("foo", node1_address, 1210, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].setgenerate(True, 1)
sync_blo | cks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus 1210, minus 20, and minus transaction fees:
expected = starting_balance
if self.options.mine_block: expected += 50
expected += tx1["amou | nt"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo"), 1220+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar"), 30+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend to miner:
mutated_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].setgenerate(True, 1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].setgenerate(True, 1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -1)
assert_equal(tx2["confirmations"], -1)
# Node0's total balance should be starting balance, plus 100BTC for
# two more matured blocks, minus 1210 for the double-spend:
expected = starting_balance + 100 - 1210
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# foo account should be debited, but bar account should not:
assert_equal(self.nodes[0].getbalance("foo"), 1220-1210)
assert_equal(self.nodes[0].getbalance("bar"), 30)
# Node1's "from" account balance should be just the mutated send:
assert_equal(self.nodes[1].getbalance("from0"), 1210)
if __name__ == '__main__':
TxnMallTest().main()
|
amigos-do-gesiel/iespv-administrativo | users/forms.py | Python | mit | 597 | 0.031826 | from .models import Donor
from django.forms import ModelForm, TextInput
class DonorForm(ModelForm):
class Meta:
model = Donor
fields = [
"email",
"donation_date",
"phone_number",
"address",
"observations"
]
widgets = {
'donation_date': TextInput(attrs={'class': 'form-control' | }),
'phone_number': TextInput(attrs={'class': 'form-control'}),
'address': TextInput(attrs={'class': 'form-control'}),
'email': TextInput(attrs={'class': 'form-control'}),
'observations': TextInput(attrs={'class': 'form-control | '}),
} |
foobarbazblarg/stayclean | stayclean-2018-march/serve-signups-with-flask.py | Python | mit | 8,581 | 0.003613 | #!/usr/bin/env python
import subprocess
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
# Each day after you post a signup post, copy its 6-character ID to this array.
signupPageSubmissionIds = [ '7zrrj1', '7zxkpq', '8055hn', '80ddrf', '80nbm1', '80waq3' ]
flaskport = 8993
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
# New version of praw does not require explicit use of the OAuth2Util object. Presumably because reddit now REQUIRES oauth.
# o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionsForRedditSession(redditSession):
# submissions = [redditSession.get_submission(submission_id=submissionId) for submissionId in signupPageSubmissionIds]
submissions = [redditSession.submission(id=submissionId) for submissionId in signupPageSubmissionIds]
for submission in submissions:
submission.comments.replace_more(limit=None)
# submission.replace_more_comments(limit=None, threshold=0)
return submissions
def getCommentsForSubmissions(submissions):
comments = []
for submission in submissions:
commentForest = submission.comments
comments += [comment for comment in commentForest.list() if comment.__class__ == praw.models.Comment]
return comments
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatesignups.html')
def moderatesignups():
global commentHashesAndComments
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submissions = getSubmissionsForRedditSession(redditSession)
flat_comments = getCommentsForSubmissions(submissions)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
for submission in submissions:
stringio.write(submission.title)
stringio.write("<br>\n")
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="Copy display-during-signup.py stdout to clipboard">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.fullname)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</fon | t></small>')
# if ParticipantCollection().participantNamed(authorName).isStillIn:
# stringio.write(' <small><font color="green">(in)</font></small>')
# else:
# stringio.write(' <small><font color="red">(out)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleifr | ame">')
stringio.write('<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">')
# stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
# stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
# stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
# stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Signup':
print "signup - " + username
subprocess.call(['./signup.py', username])
comment.upvote()
retireCommentHash(commentHash)
# if actionToTake == 'Signup and checkin':
# print "signup and checkin - " + username
# subprocess.call(['./signup-and-checkin.sh', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Relapse':
# print "relapse - " + username
# subprocess.call(['./relapse.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Reinstate':
# print "reinstate - " + username
# subprocess.call(['./reinstate.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(com |
jainpranav/Turing_Machine_Simulator | Simulator/tmbuilder.py | Python | apache-2.0 | 7,293 | 0.009187 | # -*- coding: utf-8 -*-
from tm import TuringMachine
class TuringMachineBuilder:
"""
Creates a turing machine step by step by retrieving all the necessary
information.
By default (can be specified) | sets the halt state to 'HALT and the
blank symbol to '#'
"""
def __init__(self):
"""
Initialize a new TuringMachineBuilder with the specified haltstate and
blank symbol.
haltstate takes as default value 'HALT'
blnak takes as default value '#' and must be one char length
"""
self._states = set()
self._in_alphabet = set()
self._trans_function = {}
self._istate | = None
self._fstates = set()
self._blank = None
self._haltstate = None
#
#
def clean(self):
"""
Clear all the previous stored data
"""
self._states = set()
self._in_alphabet = set()
self._trans_function = {}
self._istate = None
self._fstates = set()
self._blank = None
self._haltstate = None
#
#
def addTransition(self, state, symbol, new_state, new_symbol, movement):
"""
addTransition(state, symbol, new_state, new_symbol, movement)
Adds the transition:
From state,symbol To new_state writing new_symbol at the current
possition and moving the head in movement direction
- state: something that represents a state, must be hashable
- symbol: something that represents a symbol, must be hashable
- new_state: something that represents a state, must be hashable
- new_symbol: something that represents a symbol, must be hashable
- movement: TuringMachine.MOVE_LEFT or TuringMachine.MOVE_RIGHT or
TuringMachine.NON_MOVEMENT
Raise Exception if symbols have more than one char length
"""
if movement not in TuringMachine.HEAD_MOVEMENTS:
raise Exception('Invalid movement')
if (hasattr(symbol, 'len') and len(symbol) > 1) or \
(hasattr(new_symbol, 'len') and len(new_symbol) > 1):
raise Exception('Symbol length > 1')
if state not in self._states:
self._states.add(state)
if symbol != self._blank and symbol not in self._in_alphabet:
self._in_alphabet.add(symbol)
if new_state not in self._states:
self._states.add(new_state)
if new_symbol != self._blank and new_symbol not in self._in_alphabet:
self._in_alphabet.add(new_symbol)
self._trans_function[(state,symbol)] = (new_state, new_symbol,
movement)
#
#
def addFinalState(self, state):
"""
Adds the specified state to the set of final states
"""
if state not in self._states:
self._states.add(state)
if state not in self._fstates:
self._fstates.add(state)
#
#
def setInitialState(self, state):
"""
Set the specified state as the initial. Mandatory operation
"""
if state not in self._states:
self._states.add(state)
self._istate = state
#
#
def hasInitialState(self):
"""
Returns True if the initial state was specified on a previous call
to setInitialState
"""
return self._istate != None
#
#
def hasHaltState(self):
"""
Returns True if the halt state was specified on a preivous call to
setHaltState
"""
return self._haltstate != None
#
#
def hasBlankSymbol(self):
"""
Returns True if the halt state was specified on a preivous call to
setBlankSymbol
"""
return self._blank != None
#
#
def setBlankSymbol(self, blank_sym):
"""
Specifies a new blank symbol
- The blank symbol must be one char length
Raise Exception if blank_sym has more than one char length
"""
if not blank_sym or len(blank_sym) > 1:
raise Exception('Symbol must be one char length')
self._blank = blank_sym
#
#
def setHaltState(self, haltstate):
"""
Specifies a new halt state
"""
# If there are a previous halt state. Check if it appears in some
# transition otherwise delete it from the list of states
if self.hasHaltState():
old_remains = False
for k, v in self._trans_function.iteritems():
if k[0] == self._haltstate or v[0] == self._haltstate:
old_remains = True
break
if not old_remains:
self._states.remove(self._haltstate)
self._haltstate = haltstate
self._states.add(self._haltstate)
#
#
def create(self):
"""
Creates a turing machine instance with the collected information.
Raises an Exception if:
The initial state remains unset
The halt state remains unset
The blank symbol remains unset
At this point the tape_alphabet is set to be: in_alphabet U {blank}
"""
if not self.hasInitialState():
raise Exception('It is necessary to specify an initial state')
if not self.hasBlankSymbol():
raise Exception('It is necessary to specify the blank symbol')
if not self.hasHaltState():
raise Exception('It is necessary to specify the halt state')
tape_alphabet = set(self._in_alphabet)
tape_alphabet.add(self._blank)
return TuringMachine(self._states, self._in_alphabet, tape_alphabet,
self._trans_function, self._istate,
self._fstates, self._haltstate, self._blank)
#
#
def getHaltState(self):
"""
Returns the halt state specified or assigned by default on the
initialization of this Builder
"""
return self._haltstate
if __name__ == '__main__':
tmb = TuringMachineBuilder()
tmb.setBlankSymbol('#')
tmb.setHaltState('HALT')
tmb.addTransition(1, 0, 2, 1, TuringMachine.MOVE_RIGHT)
tmb.addTransition(1, 1, 2, 0, TuringMachine.MOVE_RIGHT)
tmb.addTransition(2, 0, 1, 0, TuringMachine.NON_MOVEMENT)
tmb.addTransition(2, 1, 3, 1, TuringMachine.MOVE_RIGHT)
tmb.addTransition(3, 0, 'HALT', 0, TuringMachine.NON_MOVEMENT)
tmb.addTransition(3, 1, 'HALT', 1, TuringMachine.NON_MOVEMENT)
tmb.addTransition(3, '#', 'HALT', '#', TuringMachine.NON_MOVEMENT)
tmb.setInitialState(1)
tmb.addFinalState(2)
print tmb.create()
|
nfomon/shok | parser/shokparser/LexToken.py | Python | gpl-3.0 | 757 | 0.015852 | # Copyright (C) 2013 Michael Biggs. See the COPYING file at the top-level
# directory of this distribution and at http://shok.io/code/copyright.html
# Tokens that come from the Lexer are either pairs or tuples:
# colno:type
# colno:type:value
class LexToken:
colno = 0
ttype = ''
tvalue = ''
def __init__(self, tokenstr):
t = tokenstr.split(':')
if len(t) < 2 or len(t) > 3:
raise Exception("invalid token: %s" % t)
self.colno = t[0]
self.ttype = t[1]
if len(t) == 3:
| self.tvalue = t[2]
def __repr__(self):
if '' == self.tvalue:
return "<%s:%s>" % (self.colno, self.ttype)
else:
| return "<%s:%s:%s>" % (self.colno, self.ttype, self.tvalue)
def NewlineToken():
return LexToken('0:NEWL')
|
ow2-proactive/agent-linux | palinagent/daemon/controller.py | Python | agpl-3.0 | 3,298 | 0.004245 | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
#################################################################
#
# ProActive Parallel Suite(TM): The Java(TM) library for
# Parallel, Distributed, Multi-Core Computing for
# Enterprise Grids & Clouds
#
# Copyright (C) 1997-2011 INRIA/University of
# Nice-Sophia Antipolis/ActiveEon
# Contact: proactive@ow2.org or contact@activeeon.com
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation; version 3 of
# the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# If needed, contact us to obtain a release under GPL Version 2 or 3
# or a different license than the AGPL.
#
# Initial developer(s): The ProActive Team
# http://proactive.inria.fr/team_members.htm
# Contributor(s):
#
#################################################################
# $$ACTIVEEON_INITIAL_DEV$$
#################################################################
import logging
import threading
import time
logger = logging.getLogger("agent.ctr")
class Controller(object):
def __init__(self, event_generator):
self.evg = event_generator.getActions()
self.cur_event = None
self.old_event = None
self.must_stop = threading.Event()
def start(self):
while not self.must_stop.isSet():
try:
self.old_event = self.cur_event
self.cur_event = self.evg.next()
if self.old_event is not None:
self.old_event.cancel()
| logger.debug("Controller is scheduling an event -> type:%s start:%s duration:%s" % (self.cur_event.type, self.cur_event.epoch_ | date, self.cur_event.duration))
self.cur_event.schedule()
# FIXME: sleep shouldn't be used. It should be event based !
sleep_time = max(0, int((self.cur_event.epoch_date + self.cur_event.duration) - time.time()))
logger.debug("Controller will sleep for %s seconds" % sleep_time)
self.must_stop.wait(sleep_time)
except StopIteration:
logger.critical("Controller failed to get the next event for the event generator.")
break
except (KeyboardInterrupt, SystemExit):
# Terminate all the processes then exit
logger.info("Daemon exiting...")
break
# Exiting. Clean everything
if self.old_event is not None:
self.old_event.cancel()
if self.cur_event is not None:
self.cur_event.cancel()
def stop(self):
self.must_stop.set()
|
RaoUmer/python-fundamentals | challenges/04-Functions/test_A_checking_again.py | Python | apache-2.0 | 936 | 0.005342 | import pytest
import A_checking_again as A
def test_can_drink():
'''This is a much clearer test now that we're using functions!'''
assert A.can_drink(21) == True
assert A.can_drink(22) == True
assert A.can_drink(20) == False
def test_enforce(capsys):
'''Notice that we're not restricted to the data in our script.'''
A.enforce("John", 18, False)
A.enforce("Chuck", 18, True)
A.enforce("Bob", 21, True)
# Since we specify the operations more tightly, we can be more precise in
# what specific lines should be in our test
out, _ = capsys.readouterr()
lines = out.split('\n')
assert lines[0] == "Checking Chuck!"
assert lines[1] == "T | his isn't right!"
assert lines[2] == "Checking Bob!"
assert lines[3] == "OK!"
assert lines[4] == ""
# Advanced users should note that the above tests co | uld be made even more
# efficient using the "parametrize" decorator from pytest!
|
castelao/CoTeDe | cotede/qctests/bin_spike.py | Python | bsd-3-clause | 2,677 | 0.002988 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
"""
import numpy as np
from numpy import ma
def bin_spike(x, l):
"""
l is the number of points used for comparison, thus l=2 means that each
point will be compared only against the previous and following
measurements. l=2 is is probably not a good choice, t | oo small.
Maybe use pstsd instead?
Dummy way to avoid warnings when x[ini:fin] are all masked.
Improve this in the future.
"""
assert x.ndim == 1, "I'm not ready to deal with multidimensional x"
assert l%2 == 0, "l must be an even integer"
N = len(x)
bin = ma.masked_all(N)
# bin_std = ma.masked_all(N)
half_window = int(l/2)
idx = (i for i in range(half_window, N - half_window) if np.isfinite(x[i]))
for i in idx:
ini = max(0, i - ha | lf_window)
fin = min(N, i + half_window)
# At least 3 valid points
if ma.compressed(x[ini:fin]).size >= 3:
bin[i] = x[i] - ma.median(x[ini:fin])
# bin_std[i] = (np.append(x[ini:i], x[i+1:fin+1])).std()
bin[i] /= (np.append(x[ini:i], x[i+1:fin+1])).std()
return bin
class Bin_Spike(object):
def __init__(self, data, varname, cfg, autoflag=True):
self.data = data
self.varname = varname
self.cfg = cfg
self.set_features()
if autoflag:
self.test()
def keys(self):
return self.features.keys() + \
["flag_%s" % f for f in self.flags.keys()]
def set_features(self):
self.features = {'bin_spike': bin_spike(self.data[self.varname],
self.cfg['l'])}
def test(self):
self.flags = {}
try:
threshold = self.cfg['threshold']
except:
print("Deprecated cfg format. It should contain a threshold item.")
threshold = self.cfg
try:
flag_good = self.cfg['flag_good']
flag_bad = self.cfg['flag_bad']
except:
print("Deprecated cfg format. It should contain flag_good & flag_bad.")
flag_good = 1
flag_bad = 3
assert (np.size(threshold) == 1) and \
(threshold is not None) and \
(np.isfinite(threshold))
flag = np.zeros(self.data[self.varname].shape, dtype='i1')
flag[np.nonzero(self.features['bin_spike'] > threshold)] = flag_bad
flag[np.nonzero(self.features['bin_spike'] <= threshold)] = flag_good
flag[ma.getmaskarray(self.data[self.varname])] = 9
self.flags['bin_spike'] = flag
|
adazey/Muzez | libs/soundcloud/tests/test_client.py | Python | gpl-3.0 | 5,660 | 0 | import soundcloud
from soundcloud.tests.utils import MockResponse
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from nose.tools import eq_, raises
from fudge import patch
def test_kwargs_parsing_valid():
"""Test that valid kwargs are stored as properties on the client."""
client = soundcloud.Client(client_id='foo', client_secret='foo')
assert isinstance(client, soundcloud.Client)
eq_('foo', client.client_id)
client = soundcloud.Client(client_id='foo', client_secret='bar',
access_token='baz', username='you',
password='secret', redirect_uri='foooo')
eq_('foo', client.client_id)
eq_('baz', client.access_token)
@raises(AttributeError)
def test_kwargs_parsing_invalid():
"""Test that unknown kwargs are ignored."""
client = soundcloud.Client(foo='bar', client_id='bar')
client.foo
def test_url_creation():
"""Test that resources are turned into urls properly."""
client = soundcloud.Client(client_id='foo')
url = client._resolve_resource_name('tracks')
eq_('https://api.soundcloud.com/tracks', url)
url = client._resolve_resource_name('/tracks/')
eq_('https://api.soundcloud.com/tracks', url)
def test_url_creation_options():
"""Test that resource resolving works with different options."""
client = soundcloud.Client(client_id='foo', use_ssl=False)
client.host = 'soundcloud.dev'
url = client._resolve_resource_name('apps/132445')
eq_('http://soundcloud.dev/apps/132445', url)
def test_method_dispatching():
"""Test that getattr is doing right by us."""
client = soundcloud.Client(client_id='foo')
for method in ('get', 'post', 'put', 'delete', 'head'):
p = getattr(client, method)
eq_((method,), p.args)
eq_('_request', p.func.__name__)
def test_host_config():
"""We should be able to set the host on the client."""
client = soundcloud.Client(client_id='foo', host='api.soundcloud.dev')
eq_('api.soundcloud.dev', client.host)
client = soundcloud.Client(client_id='foo')
eq_('api.soundcloud.com', client.host)
@patch('requests.get')
def test_disabling_ssl_verification(fake_get):
"""We should be able to disable ssl verification when we are in dev mode"""
client = soundcloud.Client(client_id='foo', host='api.soundcloud.dev',
verify_ssl=False)
expected_url = '%s?%s' % (
client._resolve_resource_name('tracks'),
urlencode({
'limit': 5,
'client_id': 'foo'
}))
headers = {
'User-Agent': soundcloud.USER_AGENT,
'Accept': 'application/json'
}
(fake_get.expects_call()
.with_args(expected_url,
headers=headers,
verify=False,
allow_redirects=True)
.returns(MockResponse("{}")))
client.get('tracks', limit=5)
@raises(AttributeError)
def test_method_dispatching_invalid_method():
"""Test that getattr raises an attributeerror if we give it garbage."""
client = soundcloud.Client(client_id='foo')
client.foo()
@patch('requests.get')
def test_method_dispatching_get_request_readonly(fake_get):
"""Test that calling client.get() results in a proper call
to the get function in the requests module with the provided
kwargs as the querystring.
"""
client = soundcloud.Client(client_id='foo')
expected_url = '%s?%s' % (
client._resolve_resource_name('tracks'),
urlencode({
'limit': 5,
'client_id': 'foo'
}))
headers = {
'User-Agent': soundcloud.USER_AGENT,
'Accept': 'application/json'
}
(fake_get.expects_call()
.with_args(expected_url, headers=headers, allow_redirects=True)
.returns(MockResponse("{}")))
client.get('tracks', limit=5)
@patch('requests.post')
def test_method_dispatching_post_request(fake_post):
"""Test that calling client.post() results in a proper call
to the post function in the requests module.
TODO: Revise once read/write support has been added.
"""
client = soundcloud.Client(client_id='foo')
expected_url = client._resolve_resource_name('tracks')
data = {
'client_id': 'foo'
}
headers = {
'User-Agent': soundcloud.USER_AGENT
}
(fake_post.expects_call()
.with_args(expected_url,
data=data,
headers=headers,
allow_redirects=True)
.returns(MockResponse("{}")))
client.post('tracks')
@patch('requests.get')
def test_proxy_servers(fake_request):
"""Test that providing a dictionary of proxy servers works."""
proxies = {
'http': 'myproxyserver:1234'
}
client = soundcloud.Client(client_id='foo', proxies=proxies)
expected_url = "%s?%s" % (
client._resolve_resource_name('me'),
urlencode({
'client_id': 'foo'
})
)
headers = | {
'User-Agent | ': soundcloud.USER_AGENT,
'Accept': 'application/json'
}
(fake_request.expects_call()
.with_args(expected_url,
headers=headers,
proxies=proxies,
allow_redirects=True)
.returns(MockResponse("{}")))
client.get('/me')
|
rwgdrummer/maskgen | tests/batch/test_batch_converter.py | Python | bsd-3-clause | 772 | 0 | # ===================================================================== | ========
# Authors: PAR Government
# Organizati | on: DARPA
#
# Copyright (c) 2016 PAR Government
# All rights reserved.
# ==============================================================================
from tests.test_support import TestSupport
from maskgen.scenario_model import ImageProjectModel
from maskgen.batch.batch_journal_conversion import BatchConverter
from maskgen.batch.batch_project import BatchProject
class TestBatchConverter(TestSupport):
def test_converter(self):
model = ImageProjectModel(self.locateFile('images/sample.json'))
converter = BatchConverter(model)
batch = converter.convert()
bp = BatchProject(batch)
bp.saveGraphImage('.')
|
0-wHiTeHand-0/CTFs | made_faqin2k18/heap/flag.py | Python | gpl-2.0 | 413 | 0.031477 | import sys
#Se le pasa la flag deseada, y devuelve lo que hay que escribir en el binario. CUIDADO CON LAS BACKSLASHES; hay que | escaparlas
if len(sys.argv) != 2:
print "Syntax: python2 flag.py <FLAG>"
sys.exit(0)
fl | ag = sys.argv[1]
i = 0
j = len(flag)-1
l = j
flag2 = ""
while (i<l+1):
if i <= l/2:
c = 7
else:
c = 10
flag2 += chr(ord(flag[j])+c)
i = i+1
j = j-1
print flag2
|
CreditEaseDBA/Themis | rule_analysis/rule/obj/table_record_length.py | Python | apache-2.0 | 2,563 | 0.001951 | # -*- coding: utf-8 -*-
def f_get_byte_length(data_type, character_octet_length, numeric_precision, numeric_scale):
case_data_type = {
'tinyint': 1,
'smallint': 2,
'mediumint': 3,
'int': 4,
'integer': 4,
'bigint': 8,
'float': 4,
'double': 8,
'decimal': (numeric_precision + 2 if numeric_precision > numeric_scale else numeric_scale + 2),
'date': 3,
'time': 3,
'year': 1,
'datetime': 8,
'timestamp': 8,
'char': character_octet_length,
'varchar': character_octet_length,
'tinyblob': character_octet_length,
'tinytext': character_octet_length,
'blob': character_octet_length,
'text': character_octet_length,
'mediumblob': character_octet_length,
'mediumtext': character_octet_length,
'longblob': character_octet_length,
'longtext': character_octet_length,
'enum': character_octet_length,
'set': character_octet_length
}
return case_data_type.get(data_type, 0)
def execute_rule(**kwargs):
db_cursor = kwargs.get("db_cursor")
record_length = kwargs.get("record_length")
username = kwargs.get("username")
return_tabs = []
sql = """SELECT table_name,avg_row_length
FROM information_schema.tables
WHERE table_schema = '@username@'"""
sql = sql.replace("@username@", username)
db_cursor.execute(sql)
cur_tables = db_cursor.fetchall()
for rec_tab in cur_tables:
table_name = rec_tab[0]
sql = """
select column_name,data_type,character_octet_length,ifnull(numeric_precision,-1),ifnull(numeric_scale,-1)
from information_schema.columns
where table_schema='@username@' and
table_name='@table_name@'
"""
sql = | sql.replace("@table_name@", table_name).replace("@username@", username)
db_cursor.execute(sql)
cur_columns = db_cursor.fetchall()
tmp_column_length = 0
for rec_col in cur_columns:
data_type = rec_col[1]
character_octet_length = rec_col[2]
numeric_precision = rec_col[3]
numeric_scale = re | c_col[4]
if tmp_column_length:
tmp_column_length = tmp_column_length + f_get_byte_length(data_type, character_octet_length, numeric_precision, numeric_scale)
if tmp_column_length > record_length:
return_tabs.append([rec_tab[0], rec_tab[1], tmp_column_length])
return return_tabs, True
|
jose36/plugin.video.live.ProyectoLuzDigital- | servers/adfly.py | Python | gpl-2.0 | 2,142 | 0.021942 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para adfly (acortador de url)
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def get_long_url( short_url ):
logger.info("servers.adfly get_long_url(short_url='%s')" % short_url)
request_headers = []
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12"])
request_headers.append(["Referer","http://linkdecrypter.com"])
post=urllib.urlencode({"pro_links":short_url,"modo_links":"text","modo_recursivo":"on","link_cache":"on"})
url = "http://linkdecrypter.com/"
|
# Parche porque python no parece reconocer bien la cabecera phpsessid
body,response_headers = scrapertools.read_body_and_headers(url,post=post,headers=request_headers)
n = 1
while True:
for name,value in response_headers:
if name=="set-cookie":
logger.info("S | et-Cookie: "+value)
cookie_name = scrapertools.get_match(value,'(.*?)\=.*?\;')
cookie_value = scrapertools.get_match(value,'.*?\=(.*?)\;')
request_headers.append(["Cookie",cookie_name+"="+cookie_value])
body,response_headers = scrapertools.read_body_and_headers(url,headers=request_headers)
logger.info("body="+body)
try:
location = scrapertools.get_match(body,'<textarea.*?class="caja_des">([^<]+)</textarea>')
logger.info("location="+location)
break
except:
n = n + 1
if n>3:
break
return location
def test():
location = get_long_url("http://85093635.linkbucks.com/")
ok = ("freakshare.com" in location)
#if ok:
# location = get_long_url("http://adf.ly/Fp6BF")
# ok = "http://vk.com/" in location
print "Funciona:",ok
return ok |
NicoVarg99/daf-recipes | ckan/ckan/ckan/ckanext/multilingual/tests/test_multilingual_plugin.py | Python | gpl-3.0 | 8,793 | 0.000114 | # encoding: utf-8
import ckan.plugins
import ckanext.multilingual.plugin as mulilingual_plugin
import ckan.lib.helpers
import ckan.lib.create_test_data
import ckan.logic.action.update
import ckan.model as model
import ckan.tests.legacy
import ckan.tests.legacy.html_check
import routes
import paste.fixture
import pylons.test
_create_test_data = ckan.lib.create_test_data
class TestDatasetTermTranslation(ckan.tests.legacy.html_check.HtmlCheckMethods):
'Test the translation of datasets by the multilingual_dataset plugin.'
@classmethod
def setup(cls):
cls.app = paste.fixture.TestApp(pylons.test.pylonsapp)
ckan.plugins.load('multilingual_dataset')
ckan.plugins.load('multilingual_group')
ckan.plugins.load('multilingual_tag')
ckan.tests.legacy.setup_test_search_index()
_create_test_data.CreateTestData.create_translations_test_data()
cls.sysadmin_user = model.User.get('testsysadmin')
cls.org = {'name': 'test_org',
| 'title' | : 'russian',
'description': 'Roger likes these books.'}
ckan.tests.legacy.call_action_api(cls.app, 'organization_create',
apikey=cls.sysadmin_user.apikey,
**cls.org)
dataset = {'name': 'test_org_dataset',
'title': 'A Novel By Tolstoy',
'owner_org': cls.org['name']}
ckan.tests.legacy.call_action_api(cls.app, 'package_create',
apikey=cls.sysadmin_user.apikey,
**dataset)
# Add translation terms that match a couple of group names and package
# names. Group names and package names should _not_ get translated even
# if there are terms matching them, because they are used to form URLs.
for term in ('roger', 'david', 'annakarenina', 'warandpeace'):
for lang_code in ('en', 'de', 'fr'):
data_dict = {'term': term,
'term_translation': 'this should not be rendered',
'lang_code': lang_code}
context = {'model': ckan.model,
'session': ckan.model.Session,
'user': 'testsysadmin'}
ckan.logic.action.update.term_translation_update(
context, data_dict)
@classmethod
def teardown(cls):
ckan.plugins.unload('multilingual_dataset')
ckan.plugins.unload('multilingual_group')
ckan.plugins.unload('multilingual_tag')
ckan.model.repo.rebuild_db()
ckan.lib.search.clear_all()
def test_user_read_translation(self):
'''Test the translation of datasets on user view pages by the
multilingual_dataset plugin.
'''
# It is testsysadmin who created the dataset, so testsysadmin whom
# we'd expect to see the datasets for.
for user_name in ('testsysadmin',):
offset = routes.url_for(
controller='user', action='read', id=user_name)
for (lang_code, translations) in (
('de', _create_test_data.german_translations),
('fr', _create_test_data.french_translations),
('en', _create_test_data.english_translations),
('pl', {})):
response = self.app.get(
offset,
status=200,
extra_environ={'CKAN_LANG': lang_code,
'CKAN_CURRENT_URL': offset})
terms = ('A Novel By Tolstoy')
for term in terms:
if term in translations:
assert translations[term] in response, response
elif term in _create_test_data.english_translations:
assert (_create_test_data.english_translations[term]
in response)
else:
assert term in response
assert 'this should not be rendered' not in response
def test_org_read_translation(self):
for (lang_code, translations) in (
('de', _create_test_data.german_translations),
('fr', _create_test_data.french_translations),
('en', _create_test_data.english_translations),
('pl', {})):
offset = '/{0}/organization/{1}'.format(
lang_code, self.org['name'])
response = self.app.get(offset, status=200)
terms = ('A Novel By Tolstoy',
'russian',
'Roger likes these books.')
for term in terms:
if term in translations:
assert translations[term] in response
elif term in _create_test_data.english_translations:
assert (_create_test_data.english_translations[term]
in response)
else:
assert term in response
assert 'this should not be rendered' not in response
def test_org_index_translation(self):
for (lang_code, translations) in (
('de', _create_test_data.german_translations),
('fr', _create_test_data.french_translations),
('en', _create_test_data.english_translations),
('pl', {})):
offset = '/{0}/organization'.format(lang_code)
response = self.app.get(offset, status=200)
for term in ('russian', 'Roger likes these books.'):
if term in translations:
assert translations[term] in response
elif term in _create_test_data.english_translations:
assert (_create_test_data.english_translations[term]
in response)
else:
assert term in response, response
assert ('/{0}/organization/{1}'.format(lang_code, self.org['name'])
in response)
assert 'this should not be rendered' not in response
class TestDatasetSearchIndex():
@classmethod
def setup_class(cls):
ckan.plugins.load('multilingual_dataset')
ckan.plugins.load('multilingual_group')
data_dicts = [
{'term': 'moo',
'term_translation': 'french_moo',
'lang_code': 'fr'},
{'term': 'moo',
'term_translation': 'this should not be rendered',
'lang_code': 'fsdas'},
{'term': 'an interesting note',
'term_translation': 'french note',
'lang_code': 'fr'},
{'term': 'moon',
'term_translation': 'french moon',
'lang_code': 'fr'},
{'term': 'boon',
'term_translation': 'french boon',
'lang_code': 'fr'},
{'term': 'boon',
'term_translation': 'italian boon',
'lang_code': 'it'},
{'term': 'david',
'term_translation': 'french david',
'lang_code': 'fr'},
{'term': 'david',
'term_translation': 'italian david',
'lang_code': 'it'}
]
context = {
'model': ckan.model,
'session': ckan.model.Session,
'user': 'testsysadmin',
'ignore_auth': True,
}
for data_dict in data_dicts:
ckan.logic.action.update.term_translation_update(
context, data_dict)
@classmethod
def teardown(cls):
ckan.plugins.unload('multilingual_dataset')
ckan.plugins.unload('multilingual_group')
def test_translate_terms(self):
sample_index_data = {
'download_url': u'moo',
'notes': u'an interesting note',
'tags': [u'moon', 'boon'],
'title': u'david',
}
result = mulilingual_plugin.MultilingualDataset().before_index(
sample_index_data)
assert result == {
|
SimonHerrera/rock-island-independents | api/rii_Api/migrations/0002_auto_20160912_2314.py | Python | mit | 1,025 | 0.000976 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-13 04:14
from __future__ import unicode_literals
from django.db import m | igrations, models
class Migration(migrations.Migration):
| dependencies = [
('rii_Api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='location',
name='state',
field=models.CharField(max_length=2),
),
migrations.AlterField(
model_name='location',
name='venueName',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='year',
name='year',
field=models.IntegerField(choices=[(1919, 1919), (1920, 1920), (1921, 1921), (1922, 1922), (1923, 1923), (1924, 1924), (1925, 1925)], default=1919),
),
migrations.AlterField(
model_name='year',
name='yearSummary',
field=models.TextField(default='', max_length=2000),
),
]
|
khanhduong95/Open-Tux-World | scripts/fall.py | Python | gpl-3.0 | 1,747 | 0.004579 | #
# Copyright (C) 2016 Dang Duong
#
# This file is part of Open Tux World.
#
# Open Tux World is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Open Tux World is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Open Tux World. If not, see <http://www.gnu.org/licenses/>.
#
from scripts import common
from mathutils import Vector
logic = common.logic
def main(cont):
own = cont.owner
own.applyForce([0, 0, -10 * own.mass], False)
if own["health"] < 1:
return
own["hit"] = False
own.enableRigidBod | y()
v = Vector((own["v_x"], own["v_y"], own["v_z"]))
dv = Vector(own.worldLinearVelocity) - v
v += dv
speed = common.getDistance([dv.x, dv.y, dv.z])
own["v_x"] = v. | x
own["v_y"] = v.y
own["v_z"] = v.z
if speed > common.DANGER_SPEED:
if speed > common.FATAL_SPEED:
own["health"] = 0
else:
own["health"] -= speed * (common.HIGH_DAMAGE_RATE if speed > common.HIGH_DANGER_SPEED else common.DAMAGE_RATE)
own.state = logic.KX_STATE3
elif speed < common.RIGID_SPEED and (cont.sensors["Collision.001"].positive or not own["fall"]):
own.disableRigidBody()
own.worldOrientation[2] = [0.0,0.0,1.0]
own.state = logic.KX_STATE2
|
rtfd/recommonmark | tests/sphinx_custom_md/conf.py | Python | mit | 794 | 0.003778 |
# -*- coding: utf-8 -*-
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
templates_path = ['_templates']
source_suffix = '.markdown'
source_parsers = { '.markdown': CommonMarkParser }
master_doc = 'index'
project = u'sphinxproj'
copyright = u'2015, rtfd'
author = u'rtfd'
version = '0.1'
release = '0.1'
language = None
exclude_patterns = ['_build']
highlight_language = 'python'
pygments_style = 'sphinx'
todo_include_todos = False
html_theme = 'alabaster'
html_static_p | ath = ['_static']
htmlhelp_ba | sename = 'sphinxproj'
def setup(app):
app.add_config_value('recommonmark_config', {
'enable_eval_rst': True,
'commonmark_suffixes': ['.markdown', '.hpp'],
}, True)
app.add_transform(AutoStructify)
|
scieloorg/opac | opac/tests/test_interface_TOC.py | Python | bsd-2-clause | 11,514 | 0.000961 | # coding: utf-8
import flask
from flask import url_for
from .base import BaseTestCase
from . import utils
class TOCTestCase(BaseTestCase):
# TOC
def test_the_title_of_the_article_list_when_language_pt(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma Português.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='pt_BR')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'pt_BR')
self.assertIn("Artigo Com Título Em Português", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_when_language_es(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma Espanhol.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)}
set_locale_url = url_for('main.set_locale', lang_code='es')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'es')
self.assertIn("Título Del Artículo En Portugués",
response.data.decode('utf-8'))
def test_the_title_of_the_article_list_when_language_en(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma Inglês.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='en')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'en')
self.assertIn("Article Title In Portuguese", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_without_translated(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma original quando não tem idioma.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = []
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='en')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'en')
self.assertIn("Article Y", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_without_unknow_language_for_article(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma original quando não conhece o idioma.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na | interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = []
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
header = {
'Referer': url_for(
| 'main.issue_toc',
url_seg=journal.url_segment,
url_seg_issue=issue.url_segment)
}
set_locale_url = url_for('main.set_locale', lang_code='es')
response = c.get(set_locale_url, headers=header, follow_redirects=True)
self.assertEqual(200, response.status_code)
self.assertEqual(flask.session['lang'], 'es')
self.assertIn("Article Y", response.data.decode('utf-8'))
def test_the_title_of_the_article_list_with_and_without_translated(self):
"""
Teste para verificar se a interface do TOC esta retornando o título no
idioma original para artigos que não tem tradução e o título traduzido
quando tem tradução do título.
"""
journal = utils.makeOneJournal()
with self.client as c:
# Criando uma coleção para termos o objeto ``g`` na interface
utils.makeOneCollection()
issue = utils.makeOneIssue({'journal': journal})
translated_titles = [
{'name': "Artigo Com Título Em Português", 'language': 'pt'},
{'name': "Título Del Artículo En Portugués", 'language': 'es'},
{'name': "Article Title In Portuguese", 'language': 'en'}
]
utils.makeOneArticle({
'issue': issue,
'title': 'Article Y',
'translated_titles': translated_titles
})
utils.makeOneArticle({
'issue': issue,
|
lzhang10/maxent | python/setup.py | Python | lgpl-2.1 | 2,832 | 0.016949 | #!/bin/env python
# This is the building script for Python maxent extension module.
# Simply type "python setup.py build" at command line to build the extension.
# After that you can type "python setup.py install" to install the extension
# module.
#
# The script assume you use gcc on unix and msvc on win32 platform.
from sys import platform, exec_prefix
from distutils.core import setup, Extension
# change the lines below according to your boost location
if platform == "win32":
libmaxent_name = 'libmaxent'
extra_compile_args = [
"-DWIN32",
"-DPYTHON_MODULE",
"-DHAVE_FORTRAN=1",
"-DBOOST_DISABLE_THREADS",
"-DBOOST_DISABLE_ASSERTS",
"/GR",
]
data_files = [('Lib/site-packages/maxent' ,
['stlport_vc7146.dll',
'libifcoremd.dll',
'libmmd.dll']),
]
opt_lib = []
else: # unix
libmaxent_name = 'maxent'
extra_compile_args = [
"-DNDEBUG",
"-DPYTHON_MODULE",
"-DBOOST_DISABLE_THREADS",
]
data_files = []
# various options detected from running ../configure
opt_lib = []
opt_lib_path = []
ac_cv_lib_z_main = "@ac_cv_lib_z_main@"
if ac_cv_lib_z_main == 'yes':
opt_lib.append('z')
fclibs = "/usr/lib/x86_64-linux-gnu/libboost_chrono.a"
opt_lib_path.append("/usr/lib/x86_64-linux-gnu/")
opt_lib.append('boost_chrono')
opt_lib.append('boost_timer')
# if fclibs != '':
# for s in fclibs.split():
# if s[:2] == '-L':
# opt_lib_path.append(s[2:])
# elif s[:2] == '-l':
# opt_lib.append(s[2:])
# else:
# raise 'unknow FCLIBS item: %s' % s
setup(name = "maxent",
version = "version-devel",
author = "Le Zhang",
author_email = "ejoy@users.sourceforge.net",
url = "http://homepages.inf.ed.ac.uk/lzhang10/maxent_toolkit.html",
description = "A Maximum E | ntropy Modeling toolkit in python",
long_description = """Max | ent is a powerful, flexible, and easy-to-use
Maximum Entropy Modeling library for Python. The core engine is written in C++
with speed and portability in mind.
The win32 version of this module was compiled with MSVC7.1, Intel Fortran 8.0,
STLPort 4.6.
""",
license = "LGPL",
packages = ['maxent'],
ext_modules=[
Extension("maxent._cmaxent",
["maxent_wrap.cxx"],
include_dirs=[
"../src",
],
library_dirs=[
"../build/src",
] + opt_lib_path,
libraries = [libmaxent_name] + opt_lib,
extra_compile_args = extra_compile_args,
)
],
data_files = data_files,
)
|
smallswan267/octoplus | selenium/webdriver/chrome/webdriver.py | Python | mit | 3,175 | 0.00315 | #!/usr/bin/python
#
# Copyright 2011-2013 Software freedom conservancy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from selenium.webdriver.remote.command import Command
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.common.exceptions import WebDriverException
from .service import Service
from .options import Options
class WebDriver(RemoteWebDriver):
"""
Controls the ChromeDriver and allows you to drive the browser.
You will need to download the ChromeDriver executable from
http://chromedriver.storage.googleapis.com/index.html
"""
def __init__(self, executable_path="chromedriver", port=0,
chrome_options=None, service_args=None,
desired_capabilities=None, service_log_path=None):
"""
Creates a new instance of the chrome driver.
Starts the service and then creates new instance of chrome driver.
:Args:
- executable_path - path to the executable. If the default is used it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0, a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- chrome_options: this takes an instance of ChromeOptions
"""
if chrome_options is None:
# desired_capabilities stays as passed in
if desired_capabilities is None:
desired_capabilities = Options().to_capabilities()
else:
if desired_capabilities is None:
desired_capabilities = chrome_options.to_capabilities()
else:
desired_capabilities.update(chrome_options.to_capabilities())
self.service = Service(executable_path, port=port,
service_args=service_args, log_path=service_log_path)
self.service.start()
try:
RemoteWebDriver.__init__(self,
command_executor=self.service.service_url,
desired_capa | bilities=desired_capabilities,
keep_alive=True)
except:
self.quit()
raise
self._is_remote = False
def quit(self):
"""
Closes the browser and shuts down the ChromeDriver executable
that is started when starting the ChromeDriver
"""
try:
RemoteWebDriver.quit(self)
except:
# We don't care about the message because something probably has gone wrong
pass
fina | lly:
self.service.stop()
|
algorhythms/LintCode | Segment Tree Query II.py | Python | apache-2.0 | 1,529 | 0.003924 | """
For an array, we can build a SegmentTree for it, each node stores an extra attribute count to denote the number of
elements in the the array which value is between interval start and end. (The array may not fully filled by elements)
Design a query method with three parameters root, start and end, find the number of elements in the in array's interval
[start, end] by the given root of value SegmentTree.
Have you met this question in a real interview? Yes
Example
For array [0, empty, 2, 3], the corresponding value Segment Tree is:
[0, 3, count=3]
/ \
[0,1,count=1] [2,3,count=2]
/ \ / \
[0,0,count=1] [1,1,count=0] [2,2,count=1], [3,3,count=1]
query(1, 1), return 0
query(1, 2), return 1
query(2, 3), return 2
query(0, 2), return 2
"""
__author__ = 'Daniel'
DEFAULT = 0
f = lambda x, y: x+y
class Solution:
def query(self, root, s, e):
"""
Segment: [s, e]
:param root: The root of segment tree
:param start: start of segment/interval
:param end: end of segment/interval
:return: The count number in the interval [start, end]
"""
if not root:
return DEFAULT
| if s <= root.start and e >= root.end:
return root.count
if s > root.end or e < root.start:
return DEFAULT
l = self.query(root.left, s, e)
| r = self.query(root.right, s, e)
return f(l, r)
|
prisonersDilemma/orionscripts | whois/whois/tests/__init__.py | Python | gpl-2.0 | 65 | 0.015385 | #!/usr/bin/env python3.6
if __name__ | == '__main__':
| pass
|
anhstudios/swganh | data/scripts/templates/object/static/structure/general/shared_prp_junk_s4.py | Python | mit | 449 | 0.046771 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object i | mport *
def create(kernel):
result = Static()
result.template = "object/static/structure/general/shared_prp_junk_s4.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
| #### END MODIFICATIONS ####
return result |
OCA/manufacture | mrp_production_serial_matrix/wizards/mrp_production_serial_matrix.py | Python | agpl-3.0 | 13,579 | 0.001178 | # Copyright 2021 ForgeFlow S.L. (https://www.forgeflow.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import _, api, fields, models
from odoo.exceptions import UserError, ValidationError
from odoo.tools.float_utils import float_compare, float_is_zero
class MrpProductionSerialMatrix(models.TransientModel):
_name = "mrp.production.serial.matrix"
_description = "Mrp Production Serial Matrix"
production_id = fields.Many2one(
comodel_name="mrp.production",
string="Manufacturing Order",
readonly=True,
)
product_id = fields.Many2one(
related="production_id.product_id",
readonly=True,
)
company_id = fields.Many2one(
related="production_id.company_id",
readonly=True,
)
finished_lot_ids = fields.Many2many(
string="Finished Product Serial Numbers",
comodel_name="stock.production.lot",
domain="[('product_id', '=', product_id)]",
)
line_ids = fields.One2many(
string="Matrix Cell",
comodel_name="mrp.production.serial.matrix.line",
inverse_name="wizard_id",
)
lot_selection_warning_msg = fields.Char(compute="_compute_lot_selection_warning")
lot_selection_warning_ids = fields.Many2many(
comodel_name="stock.production.lot", compute="_compute_lot_selection_warning"
)
lot_selection_warning_count = fields.Integer(
compute="_compute_lot_selection_warning"
)
@api.depends("line_ids", "line_ids.component_lot_id")
def _compute_lot_selection_warning(self):
for rec in self:
warning_lots = self.env["stock.production.lot"]
warning_msgs = []
# Serials:
serial_lines = rec.line_ids.filtered(
lambda l: l.component_id.tracking == "serial"
)
serial_counter = {}
for sl in serial_lines:
if not sl.component_lot_id:
continue
serial_counter.setdefault(sl.component_lot_id, 0)
serial_counter[sl.component_lot_id] += 1
for lot, counter in serial_counter.items():
if counter > 1:
warning_lots += lot
warning_msgs.append(
"Serial number %s selected several times" % lot.name
)
# Lots
lot_lines = rec.line_ids.filtered(
lambda l: l.component_id.tracking == "lot"
)
lot_consumption = {}
for ll in lot_lines:
if not ll.component_lot_id:
continue
lot_consumption.setdefault(ll.component_lot_id, 0)
free_qty, reserved_qty = ll._get_available_and_reserved_quantities()
available_quantity = free_qty + reserved_qty
if (
available_quantity - lot_consumption[ll.component_lot_id]
< ll.lot_qty
):
warning_lots += ll.component_lot_id
warning_msgs.append(
"Lot %s not available at the needed qty (%s/%s)"
% (ll.component_lot_id.name, available_quantity, ll.lot_qty)
)
lot_consumption[ll.component_lot_id] += ll.lot_qty
not_filled_lines = rec.line_ids.filtered(
lambda l: l.finished_lot_id and not l.component_lot_id
)
if not_filled_lines:
not_filled_finshed_lots = not_filled_lines.mapped("finished_lot_id")
warning_lots += not_filled_finshed_lots
warning_msgs.append(
"Some cells are not filled for some finished serial number (%s)"
% ", ".join(not_filled_finshed_lots.mapped("name"))
)
rec.lot_selection_warning_msg = ", ".join(warning_msgs)
rec.lot_selection_warning_ids = warning_lots
rec.lot_selection_warning_count = len(warning_lots)
@api.model
def default_get(self, fields):
res = super().default_get(fields)
production_id = self.env.context["active_id"]
active_model = self.env.context["active_model"]
if not production_id:
return res
assert active_model == "mrp.production", "Bad context propagation"
production = self.env["mrp.production"].browse(production_id)
if not production.show_serial_matrix:
raise UserError(
_("The finished product of this MO is not tracked by serial numbers.")
)
finished_lots = self.env["stock.production.lot"]
if production.lot_producing_id:
finished_lots = production.lot_producing_id
matrix_lines = self._get_matrix_lines(production, finished_lots)
res.update(
{
"line_ids": [(0, 0, x) for x in matrix_lines],
"production_id": production_id,
"finished_lot_ids": [(4, lot_id, 0) for lot_id in finished_lots.ids],
}
)
return res
def _get_matrix_lines(self, production, finished_lots):
tracked_components = []
for line in production.bom_id.bom_line_ids:
if line.product_id.tracking == "serial":
# TODO: factor if parent is not 1.
# TODO: uom.
for i in range(1, int(line.product_qty) + 1):
tracked_components.append((line.product_id, i, 1))
elif line.product_id.tracking == "lot":
tracked_components.append((line.product_id, 0, line.product_qty))
matrix_lines = []
current_lot = False
new_lot_number = 0
for _i in range(int(production.product_qty)):
if finished_lots:
current_lot = finished_lots[0]
else:
new_lot_number += 1
for component_tuple in tracked_components:
line = self._prepare_matrix_line(
component_tuple, finished_lot=current_lot, number=new_lot_number
)
matrix_lines.append(line)
if current_lot:
finished_lots -= current_lot
current_lot = False
return matrix_lines
def _prepare_matrix_line(self, component_tuple, finished_lot=None, number=None):
component, lot_no, lot_qty = component_tuple
column_name = component.display_name
if lot_no > 0:
column_name += " (%s)" % lot_no
res = {
"component_id": component.id,
"component_column_name": column_name,
"lot_qty": lot_qty,
}
if finished_lot:
if isinstance(finished_lot.id, models.NewId):
# NewId instances are not handled correctly later, this is a
# small workaround. In future versions it might not be needed.
lot_id = finished_lot.id.origin
else:
lot_id = finished_lot.id
res.update(
{
"finished_lot_id": lot_id,
"finished_lot_name": f | inished_lot.name,
}
)
e | lif isinstance(number, int):
res.update(
{
"finished_lot_name": _("(New Lot %s)") % number,
}
)
return res
@api.onchange("finished_lot_ids")
def _onchange_finished_lot_ids(self):
for rec in self:
matrix_lines = self._get_matrix_lines(
rec.production_id,
rec.finished_lot_ids,
)
rec.line_ids = False
rec.write({"line_ids": [(0, 0, x) for x in matrix_lines]})
def button_validate(self):
self.ensure_one()
if self.lot_selection_warning_count > 0:
raise UserError(
_("Some issues has been detected in your selection: %s")
% self.lot_selection_warning_msg
)
mos = self.env["mrp.production"]
current_mo = sel |
ryfeus/lambda-packs | Rasterio_osgeo_shapely_PIL_pyproj_numpy/source/rasterio/rio/rasterize.py | Python | mit | 10,025 | 0.0002 | import json
import logging
from math import ceil
import os
import click
import cligj
from .helpers import resolve_inout
from . import options
import rasterio
from rasterio.errors import CRSError
from rasterio.transform import Affine
from rasterio.coords import disjoint_bounds
logger = logging.getLogger('rio')
# Common options used below
# Unlike the version in cligj, this one doesn't require values.
files_inout_arg = click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
metavar="INPUTS... OUTPUT")
@click.command(short_help='Rasterize features.')
@files_inout_arg
@options.output_opt
@cligj.format_opt
@options.like_file_opt
@options.bounds_opt
@options.dimensions_opt
@options.resolution_opt
@click.option('--src-crs', '--src_crs', 'src_crs', default=None,
help='Source coordinate reference system. Limited to EPSG '
'codes for now. Used as output coordinate system if output '
'does not exist or --like option is not used. '
'Default: EPSG:4326')
@options.all_touched_opt
@click.option('--default-value', '--default_value', 'default_value',
type=float, default=1, help='Default value for rasterized pixels')
@click.option('--fill', type=float, default=0,
help='Fill value for all pixels not overlapping features. Will '
'be evaluated as NoData pixels for output. Default: 0')
@click.option('--property', 'prop', type=str, default=None, help='Property in '
'GeoJSON features to use for rasterized values. Any features '
'that lack this property will be given --default_value instead.')
@options.force_overwrite_opt
@options.creation_options
@click.pass_context
def rasteriz | e(
ctx,
files,
output,
driver,
like,
bounds,
dimensions,
res,
src_crs,
all_touched,
default_value,
fill,
| prop,
force_overwrite,
creation_options):
"""Rasterize GeoJSON into a new or existing raster.
If the output raster exists, rio-rasterize will rasterize feature values
into all bands of that raster. The GeoJSON is assumed to be in the same
coordinate reference system as the output unless --src-crs is provided.
--default_value or property values when using --property must be using a
data type valid for the data type of that raster.
If a template raster is provided using the --like option, the affine
transform and data type from that raster will be used to create the output.
Only a single band will be output.
The GeoJSON is assumed to be in the same coordinate reference system unless
--src-crs is provided.
--default_value or property values when using --property must be using a
data type valid for the data type of that raster.
--driver, --bounds, --dimensions, and --res are ignored when output exists
or --like raster is provided
If the output does not exist and --like raster is not provided, the input
GeoJSON will be used to determine the bounds of the output unless
provided using --bounds.
--dimensions or --res are required in this case.
If --res is provided, the bottom and right coordinates of bounds are
ignored.
Note:
The GeoJSON is not projected to match the coordinate reference system
of the output or --like rasters at this time. This functionality may be
added in the future.
"""
from rasterio.crs import CRS
from rasterio.features import rasterize
from rasterio.features import bounds as calculate_bounds
verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1
output, files = resolve_inout(
files=files, output=output, force_overwrite=force_overwrite)
bad_param = click.BadParameter('invalid CRS. Must be an EPSG code.',
ctx, param=src_crs, param_hint='--src_crs')
has_src_crs = src_crs is not None
try:
src_crs = CRS.from_string(src_crs) if has_src_crs else CRS.from_string('EPSG:4326')
except CRSError:
raise bad_param
# If values are actually meant to be integers, we need to cast them
# as such or rasterize creates floating point outputs
if default_value == int(default_value):
default_value = int(default_value)
if fill == int(fill):
fill = int(fill)
with rasterio.Env(CPL_DEBUG=verbosity > 2):
def feature_value(feature):
if prop and 'properties' in feature:
return feature['properties'].get(prop, default_value)
return default_value
with click.open_file(files.pop(0) if files else '-') as gj_f:
geojson = json.loads(gj_f.read())
if 'features' in geojson:
geometries = []
for f in geojson['features']:
geometries.append((f['geometry'], feature_value(f)))
elif 'geometry' in geojson:
geometries = ((geojson['geometry'], feature_value(geojson)), )
else:
raise click.BadParameter('Invalid GeoJSON', param=input,
param_hint='input')
geojson_bounds = geojson.get('bbox', calculate_bounds(geojson))
if os.path.exists(output):
with rasterio.open(output, 'r+') as out:
if has_src_crs and src_crs != out.crs:
raise click.BadParameter('GeoJSON does not match crs of '
'existing output raster',
param='input', param_hint='input')
if disjoint_bounds(geojson_bounds, out.bounds):
click.echo("GeoJSON outside bounds of existing output "
"raster. Are they in different coordinate "
"reference systems?",
err=True)
meta = out.meta.copy()
result = rasterize(
geometries,
out_shape=(meta['height'], meta['width']),
transform=meta.get('affine', meta['transform']),
all_touched=all_touched,
dtype=meta.get('dtype', None),
default_value=default_value,
fill=fill)
for bidx in range(1, meta['count'] + 1):
data = out.read(bidx, masked=True)
# Burn in any non-fill pixels, and update mask accordingly
ne = result != fill
data[ne] = result[ne]
data.mask[ne] = False
out.write(data, indexes=bidx)
else:
if like is not None:
template_ds = rasterio.open(like)
if has_src_crs and src_crs != template_ds.crs:
raise click.BadParameter('GeoJSON does not match crs of '
'--like raster',
param='input', param_hint='input')
if disjoint_bounds(geojson_bounds, template_ds.bounds):
click.echo("GeoJSON outside bounds of --like raster. "
"Are they in different coordinate reference "
"systems?",
err=True)
kwargs = template_ds.meta.copy()
kwargs['count'] = 1
# DEPRECATED
# upgrade transform to affine object or we may get an invalid
# transform set on output
kwargs['transform'] = template_ds.affine
template_ds.close()
else:
bounds = bounds or geojson_bounds
if src_crs.is_geographic:
if (bounds[0] < -180 or bounds[2] > 180 or
bounds[1] < -80 or bounds[3] > 80):
raise click.BadParameter(
"Bounds are beyond the valid extent for "
"EPSG:4326.",
|
marrow/web.db | test/test_sqlalchemy.py | Python | mit | 1,437 | 0.03897 | # encoding: utf-8
from __future__ import unicode_literals
import pytest
from web.core.context import Context
from web.db.sa import SQLAlchemyConnection
from sqlalchemy import Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
name = Column(String)
class Address(Base):
__tablename__ = 'address'
id = Column(Integer, primary_key=True)
address = Column(String)
person_id = Column(Integer, ForeignKey(Person.id))
person = relationship(Person)
@pytest.fixture
def sa():
ctx = Context(db=Context())
sa = SQLAlchemyConnection('sqlite://', 'test')
sa.start(ctx)
engine = ctx.db.test
sa.prepare(ctx)
session = ctx.db.test
yield engine, session
sa.done(ctx)
sa.stop(ctx)
| class TestSQLAlchemy(object):
def test_lifecycle(self, sa):
pass
def test_repr(self):
sa = SQLAlchemyConnection | ('sqlite://', 'test')
assert repr(sa) == 'SQLAlchemyConnection(test, "sqlite://")'
def test_use(self, sa):
engine, session = sa
Base.metadata.create_all(engine)
p = Person(name='person')
session.add(p)
a = Address(address='address', person=p)
session.add(a)
session.commit()
p = session.query(Person).filter(Person.name == 'person').one()
assert isinstance(p, Person)
assert p.id == 1
|
dcooper2/Swanson_Bot | swanson_bot.py | Python | mit | 2,143 | 0.005133 | #!/usr/bin/python
import praw
import re
import os
import pickle
from array import *
import random
#REPLY = "I want all the bacon and eggs you have."
REPLY = array('i',["I want all the bacon and eggs you have", "I know what I'm about son", "I'm not interested in caring about people", "Is this not rap?"])
if not os.path.isfile("inigo_config.txt"):
print "You must create the file swanson_config.txt with the pickled credentials."
exit(1)
else:
print "Loading credentials"
user_data = pickle.load( open("swanson_config.txt","rb"))
#print user_data
user_agent = ("Swanson bot 0.1 created by /u/dcooper2.")
r = praw.Reddit(user_agent=user_agent)
r.login(user_data[0], user_data[1])
del user_data
print "Successfully logged in"
# Check for previous replies
if not os.path.isfile("replies.txt"):
replies = []
else:
print "Loading previous reply ids"
with open("replies.txt", "r") as f:
replies = f.read()
replies = replies.split("\n")
replies = filter(None, replies)
# Check for new items to reply to
subreddit = r.get_subreddit('umw_cpsc470Z')
print "Checking for new posts"
for submission in subreddit.get_hot(limit=10):
print "Checking submission ", submission.id
if submission.id not in replies:
if re.search("Ron Swanson", submission.title, re.IGNORECASE) or re.search("Ron Swanson", submission.selftext, re.IGNORECASE):
| x = random.randi | nt(0,3)
submission.add_comment(REPLY[x])
print "Bot replying to submission: ", submission.id
replies.append(submission.id)
print "Checking comments"
flat_comments = praw.helpers.flatten_tree(submission.comments)
for comment in flat_comments:
if comment.id not in replies:
if re.search("Ron Swanson", comment.body, re.IGNORECASE):
y = random.randint(0,3)
print "Bot replying to comment: ", comment.id
comment.reply(REPLY[y])
replies.append(comment.id)
# Save new replies
print "Saving ids to file"
with open("replies.txt", "w") as f:
for i in replies:
f.write(i + "\n")
|
kako-nawao/django-group-by | test_app/query.py | Python | mit | 141 | 0 | from django_group_by import GroupByMixin
from django.db.models.query import QuerySet
class BookQuerySet(QuerySet, Group | ByMixin):
pass
| |
mhmurray/cloaca | cloaca/test/tests.py | Python | mit | 3,034 | 0.004285 | #!/usr/bin/env python
import unittest
import logging
import logging.config
import sys
import argparse
DESCRIPTION="""
Harness for tests in the cloaca/tests/ directory.
Run all tests with '--all' or provide a list dotted names
of specific tests (eg. legionary.TestLegionary.test_legionary).
"""
# Set up logging. See logging.json for config
def setup_logging(
default_path='test_logging.json',
default_level=logging.INFO):
"""Setup logging configuration
"""
import sys, os, json
path = default_path
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
def main():
parser = argparse.ArgumentParser(
description=DESCRIPTION, |
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--all', action='store_true',
help='Run all tests instead of matching pattern.')
parser.add_argument('pattern', nargs='*',
help=('pattern(s) to match against, eg. "buildings" or '
'"architect.TestArchitect.test_lead_architect".'))
parser.add_argument('-v', '--verbose', action='store_true' | ,
help='Use verbose test result reporting.')
parser.add_argument('-q', '--quiet', action='store_true',
help=('Suppress individual test result reporting. Still reports '
'summary information. Overrides --verbose.'))
parser.add_argument('--log-level', default='WARNING',
help=('Set app log level during tests. Valid arguments are: '
'DEBUG, INFO, WARNING, ERROR, CRITICAL. See logging module '
'documentation.'))
args = parser.parse_args()
setup_logging()
numeric_level = getattr(logging, args.log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: {0!s}'.format(args.log_level))
# This catches the children loggers like cloaca.game
logging.getLogger('cloaca').setLevel(numeric_level)
loader = unittest.defaultTestLoader
if args.all:
sys.stderr.write('Running all tests.\n')
suites = loader.discover('.', pattern='*.py')
else:
if len(args.pattern) == 0:
sys.stderr.write('ERROR: No tests specified.\n\n')
parser.print_help(file=sys.stderr)
return
sys.stderr.write('Running all tests matching the patterns ('
+ ', '.join(args.pattern) + ')\n')
suites = loader.loadTestsFromNames(args.pattern)
test_suite = unittest.TestSuite(suites)
# TextTestRunner takes verbosity that can be 0 (quiet), 1 (default),
# or 2 (verbose). Quiet overrides verbose.
if args.quiet:
verbosity = 0
elif args.verbose:
verbosity = 2
else:
verbosity=1
test_runner = unittest.TextTestRunner(verbosity=verbosity).run(test_suite)
if __name__ == '__main__':
main()
|
opencobra/memote | src/memote/suite/cli/reports.py | Python | apache-2.0 | 14,140 | 0.000283 | # -*- coding: utf-8 -*-
# Copyright 2017 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide commands for generating report files."""
from __future__ import absolute_import
import logging
import os
import sys
from builtins import open
from functool | s import partial
from multiprocessing import Pool, cpu_count
import click
import git
from libsbml import SBMLError
from sqlalchemy.exc import ArgumentError
import memote.suite.api as api
import memote.suite.cli.callbacks as callbacks
import memote.suite.results as managers
import memote.utils as utils
from memote.suite.cli import CONTEXT_SETTINGS
from memote.suite.reporting import ReportConfiguration
LOGGER = logging.getLogger(__name__)
@click.group()
@click.help_option("--help", "-h")
def repo | rt():
"""Generate one of three different types of reports."""
pass
@report.command(context_settings=CONTEXT_SETTINGS)
@click.help_option("--help", "-h")
@click.argument(
"model", type=click.Path(exists=True, dir_okay=False), envvar="MEMOTE_MODEL"
)
@click.option(
"--filename",
type=click.Path(exists=False, writable=True),
default="index.html",
show_default=True,
help="Path for the HTML report output.",
)
@click.option(
"--pytest-args",
"-a",
callback=callbacks.validate_pytest_args,
help="Any additional arguments you want to pass to pytest. "
"Should be given as one continuous string.",
)
@click.option(
"--exclusive",
type=str,
multiple=True,
metavar="TEST",
help="The name of a test or test module to be run exclusively. "
"All other tests are skipped. This option can be used "
"multiple times and takes precedence over '--skip'.",
)
@click.option(
"--skip",
type=str,
multiple=True,
metavar="TEST",
help="The name of a test or test module to be skipped. This "
"option can be used multiple times.",
)
@click.option(
"--solver",
type=click.Choice(["cplex", "glpk", "gurobi", "glpk_exact"]),
default="glpk",
show_default=True,
help="Set the solver to be used.",
)
@click.option(
"--solver-timeout",
type=int,
default=10,
help="Timeout in seconds to set on the mathematical optimization solver.",
)
@click.option(
"--experimental",
type=click.Path(exists=True, dir_okay=False),
default=None,
callback=callbacks.validate_experimental,
help="Define additional tests using experimental data.",
)
@click.option(
"--custom-tests",
type=click.Path(exists=True, file_okay=False),
multiple=True,
help="A path to a directory containing custom test "
"modules. Please refer to the documentation for more "
"information on how to write custom tests. May be "
"specified multiple times.",
)
@click.option(
"--custom-config",
type=click.Path(exists=True, dir_okay=False),
multiple=True,
help="A path to a report configuration file that will be merged "
"into the default configuration. It's primary use is to "
"configure the placement and scoring of custom tests but "
"it can also alter the default behavior. Please refer to "
"the documentation for the expected YAML format used. This "
"option can be specified multiple times.",
)
def snapshot(
model,
filename,
pytest_args,
exclusive,
skip,
solver,
solver_timeout,
experimental,
custom_tests,
custom_config,
):
"""
Take a snapshot of a model's state and generate a report.
MODEL: Path to model file. Can also be supplied via the environment variable
MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'.
"""
model_obj, sbml_ver, notifications = api.validate_model(model)
if model_obj is None:
LOGGER.critical(
"The model could not be loaded due to the following SBML errors."
)
utils.stdout_notifications(notifications)
api.validation_report(model, notifications, filename)
sys.exit(1)
if not any(a.startswith("--tb") for a in pytest_args):
pytest_args = ["--tb", "no"] + pytest_args
# Add further directories to search for tests.
pytest_args.extend(custom_tests)
config = ReportConfiguration.load()
# Update the default test configuration with custom ones (if any).
for custom in custom_config:
config.merge(ReportConfiguration.load(custom))
model_obj.solver = solver
_, results = api.test_model(
model_obj,
sbml_version=sbml_ver,
results=True,
pytest_args=pytest_args,
skip=skip,
exclusive=exclusive,
experimental=experimental,
solver_timeout=solver_timeout,
)
with open(filename, "w", encoding="utf-8") as file_handle:
LOGGER.info("Writing snapshot report to '%s'.", filename)
file_handle.write(api.snapshot_report(results, config))
@report.command(context_settings=CONTEXT_SETTINGS)
@click.help_option("--help", "-h")
@click.option(
"--location",
envvar="MEMOTE_LOCATION",
help="Location of test results. Can either by a directory or an "
"rfc1738 compatible database URL.",
)
@click.option(
"--model",
envvar="MEMOTE_MODEL",
help="The path of the model file. Used to check if it was " "modified.",
)
@click.option(
"--filename",
type=click.Path(exists=False, writable=True),
default="index.html",
show_default=True,
help="Path for the HTML report output.",
)
@click.option(
"--deployment",
default="gh-pages",
show_default=True,
help="Results will be read from and committed to the given " "branch.",
)
@click.option(
"--custom-config",
type=click.Path(exists=True, dir_okay=False),
multiple=True,
help="A path to a report configuration file that will be merged "
"into the default configuration. It's primary use is to "
"configure the placement and scoring of custom tests but "
"it can also alter the default behavior. Please refer to "
"the documentation for the expected YAML format used. This "
"option can be specified multiple times.",
)
def history(location, model, filename, deployment, custom_config):
"""Generate a report over a model's git commit history."""
callbacks.git_installed()
LOGGER.info("Initialising history report generation.")
if location is None:
raise click.BadParameter("No 'location' given or configured.")
try:
repo = git.Repo()
except git.InvalidGitRepositoryError:
LOGGER.critical(
"The history report requires a git repository in order to check "
"the model's commit history."
)
sys.exit(1)
LOGGER.info(
"Obtaining history of results from "
"the deployment branch {}.".format(deployment)
)
repo.git.checkout(deployment)
try:
manager = managers.SQLResultManager(repository=repo, location=location)
except (AttributeError, ArgumentError):
manager = managers.RepoResultManager(repository=repo, location=location)
config = ReportConfiguration.load()
# Update the default test configuration with custom ones (if any).
for custom in custom_config:
config.merge(ReportConfiguration.load(custom))
LOGGER.info("Tracing the commit history.")
history = managers.HistoryManager(repository=repo, manager=manager)
history.load_history(model, skip={deployment})
LOGGER.info("Composing the history report.")
report = api.history_report(history, config=config)
with open(filename, "w", encoding="utf-8") as file_handle:
file_handle.write(report)
def _test_diff(
model_an |
tzanetos/graphs | graphs/libs/ubigraph/UbiGraph-alpha-0.2.4-Linux64-Ubuntu-8.04/examples/Python/architecture.py | Python | mit | 689 | 0.044993 | import ubigraph, sys
#Omit
U = ubigraph.Ubigraph()
U.clear()
U.defaultVertexStyle.set(fontsize=14)
engine = U.newVertex(color="#ffff00",shape="dodecahedron",label="LayoutEngine")
server = U.newVertex(color="#8080ff",shape="cube",label="XMLRPC Server")
engine_server_edge = U.newEdge(server,engine,width=3,arrow=True,oriented=True)
def client():
clientXVertex = U.newVertex(color="#8080ff",shape="cube",label="XMLRPC Client")
clientVertex = U.newVer | tex(color="#ff0080",shape="cube",label="Client App")
U.newEdge(clientVertex,clientXVertex,arrow=True,oriented=True)
U.newEdge(clientXVertex,server,arrow=True,oriented=True)
for i in range(0,3):
| client()
sys.stdin.readline()
|
makelove/OpenCV-Python-Tutorial | ch200_Extra_modules/aruco/Camera Calibration using ChArUco and Python/calibrateCamera2.py | Python | mit | 2,398 | 0 | # -*- coding: utf-8 -*-
# @Time : 2017/7/27 18:04
# @Author : play4fun
# @File : calibrateCamera2.py
# @Software: PyCharm
"""
calibrateCamera2.py:
"""
import cv2
import numpy as np
def draw_axis(img, charuco_corners, charuco_ids, board):
vecs = np.load("./calib.npz") # I already calibrated the camera
mtx, dist, _, _ = [vecs[i] for i in ('mtx', 'dist', 'rvecs', 'tvecs')]
ret, rvec, tvec = cv2.aruco.estimatePoseCharucoBoard(
charuco_corners, charuco_ids, board, mtx, dist)
if ret is not None and ret is True:
cv2.aruco.drawAxis(img, mtx, dist, rvec, tvec, 0.1)
def get_image(camera):
ret, img = camera.read()
return img
def make_grayscale(img):
ret = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return ret
def main():
camera = cv2.VideoCapture(0)
img = get_image(camera)
while True:
cv2.imshow('calibration', img)
cv2.waitKey(10)
img = get_image(camera)
gray = make_grayscale(img)
corners, ids, rejected = cv2.aruco.detectMarkers(gray, aruco_dict,
corners, ids)
cv2.aruco.drawDetectedMarkers(img, corn | ers, ids)
if ids | is not None and corners is not None \
and len(ids) > 0 and len(ids) == len(corners):
diamond_corners, diamond_ids = \
cv2.aruco.detectCharucoDiamond(img, corners, ids,
0.05 / 0.03, cameraMatrix=mtx,
distCoeffs=dist)
cv2.aruco.drawDetectedDiamonds(img, diamond_corners, diamond_ids)
'''if diamond_ids is not None and len(diamond_ids) >= 4:
break'''
board = cv2.aruco.CharucoBoard_create(9, 6, 0.05, 0.03,
aruco_dict)
if diamond_corners is not None and diamond_ids is not None \
and len(diamond_corners) == len(diamond_ids):
count, char_corners, char_ids = \
cv2.aruco.interpolateCornersCharuco(diamond_corners,
diamond_ids, gray,
board)
if count >= 3:
draw_axis(img, char_corners, char_ids, board)
if __name__ == '__main__':
main()
|
henla464/WiRoc-Python-2 | testOLED.py | Python | gpl-3.0 | 6,908 | 0.001158 | # Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use | , copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SO | FTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
import time
import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
RST = None
# Raspberry Pi pin configuration:
#RST = 24
# Note the following are only used with SPI:
DC = 23
SPI_PORT = 0
SPI_DEVICE = 0
# Beaglebone Black pin configuration:
# RST = 'P9_12'
# Note the following are only used with SPI:
# DC = 'P9_15'
# SPI_PORT = 1
# SPI_DEVICE = 0
# 128x32 display with hardware I2C:
#disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST)
# 128x64 display with hardware I2C:
# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)
# Note you can change the I2C address by passing an i2c_address parameter like:
# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, i2c_address=0x3C)
# Alternatively you can specify an explicit I2C bus number, for example
# with the 128x32 display you would use:
disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, i2c_bus=2)
# 128x32 display with hardware SPI:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# 128x64 display with hardware SPI:
# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# Alternatively you can specify a software SPI implementation by providing
# digital GPIO pin numbers for all the required display pins. For example
# on a Raspberry Pi with the 128x32 display you might use:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, sclk=18, din=25, cs=22)
# Initialize library.
disp.begin()
# Clear display.
disp.clear()
disp.display()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = 2
shape_width = 20
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = 0
# Load default font.
#font = ImageFont.load_default()
thinFont = ImageFont.truetype('GeosansLight.ttf', 10)
draw.text((0, 0), 'CH', font=thinFont, fill=255)
thinFont2 = ImageFont.truetype('GeosansLight.ttf', 14)
#draw.text((40, 0), 'SENDER', font=thinFont2, fill=255)
#draw.text((40, 10), 'RECEIVER', font=thinFont2, fill=255)
draw.text((40, 16), 'REPEATER', font=thinFont2, fill=255)
draw.text((40,-3), '1758', font=thinFont2, fill=255)
# Alternatively load a TTF font. Make sure the .ttf font file is in the same directory as the python script!
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
boldFont = ImageFont.truetype('theboldfont.ttf', 44)
x = 14
draw.text((x, 0), '3', font=boldFont, fill=255)
#draw.text((x, top+20), 'World!', font=font, fill=255)
x=104
top = 0
draw.rectangle((x, top, x+20, top+10), outline=255, fill=0)
draw.rectangle((x+20, top+3, x+23, top+7), outline=255, fill=0)
percent = 90
width=int((percent-5) / 5)
draw.rectangle((x+1, top+1, x+width, top+9), outline=255, fill=255)
x=80
draw.arc([(x, top), (x+16, top+16)], 210, 330, fill=255)
draw.arc([(x+3, top+3), (x+13, top+13)], 210, 335, fill=255)
draw.ellipse((x+7,top+7, x+9, top+9), outline=255, fill=255)
draw.line((x+14,top+8,x+14,top+9), fill=255)
draw.line((x+16,top+6,x+16,top+9), fill=255)
draw.line((x+18,top+4,x+18,top+9), fill=255)
draw.line((x+20,top+2,x+20,top+9), fill=255)
# Draw an ellipse.
#draw.ellipse((x, top , x+shape_width, bottom), outline=255, fill=0)
#x += shape_width+padding
# Draw a rectangle.
#draw.rectangle((x, top, x+shape_width, bottom), outline=255, fill=0)
#x += shape_width+padding
# Draw a triangle.
#draw.polygon([(x, bottom), (x+shape_width/2, top), (x+shape_width, bottom)], outline=255, fill=0)
#x += shape_width+padding
# Draw an X.
#draw.line((x, bottom, x+shape_width, top), fill=255)
#draw.line((x, top, x+shape_width, bottom), fill=255)
#x += shape_width+padding
# Display image.
disp.image(image)
disp.display()
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = 2
shape_width = 20
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = 0
draw.text((0, 0), 'CH', font=thinFont, fill=255)
thinFont2 = ImageFont.truetype('GeosansLight.ttf', 14)
#draw.text((40, 0), 'SENDER', font=thinFont2, fill=255)
#draw.text((40, 10), 'RECEIVER', font=thinFont2, fill=255)
draw.text((40, 16), 'REPEATER', font=thinFont2, fill=255)
draw.text((40,-3), '1758', font=thinFont2, fill=255)
# Alternatively load a TTF font. Make sure the .ttf font file is in the same directory as the python script!
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
boldFont = ImageFont.truetype('theboldfont.ttf', 44)
x = 14
draw.text((x, 0), '3', font=boldFont, fill=255)
#draw.text((x, top+20), 'World!', font=font, fill=255)
x=104
top = 0
draw.rectangle((x, top, x+20, top+10), outline=255, fill=0)
draw.rectangle((x+20, top+3, x+23, top+7), outline=255, fill=0)
percent = 90
width=int((percent-5) / 5)
draw.rectangle((x+1, top+1, x+width, top+9), outline=255, fill=255)
x=80
draw.arc([(x, top), (x+16, top+16)], 210, 330, fill=255)
draw.arc([(x+3, top+3), (x+13, top+13)], 210, 335, fill=255)
draw.ellipse((x+7,top+7, x+9, top+9), outline=255, fill=255)
draw.line((x+14,top+8,x+14,top+9), fill=255)
draw.line((x+16,top+6,x+16,top+9), fill=255)
draw.line((x+18,top+4,x+18,top+9), fill=255)
draw.line((x+20,top+2,x+20,top+9), fill=255)
# Display image.
disp.image(image)
disp.display()
"""
|
elijh/bitmask_client | src/leap/bitmask/services/eip/eipconfig.py | Python | gpl-3.0 | 10,587 | 0.000094 | # -*- coding: utf-8 -*-
# eipconfig.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Provider configuration
"""
import logging
import os
import re
import time
import ipaddr
from leap.bitmask.config.providerconfig import ProviderConfig
from leap.bitmask.services import ServiceConfig
from leap.bitmask.services.eip.eipspec import get_schema
from leap.bitmask.util import get_path_prefix
from leap.common.check import leap_assert, leap_assert_type
logger = logging.getLogger(__name__)
def get_eipconfig_path(domain, relative=True):
"""
Returns relative or absolute path for EIP config.
:param domain: the domain to which this eipconfig belongs to.
:type domain: str
:param relative: defines whether the path should be relative or absolute.
:type relative: bool
:returns: the path
:rtype: str
"""
leap_assert(domain is not None, "get_eipconfig_path: We need a domain")
path = os.path.join("leap", "providers", domain, "eip-service.json")
if not relative:
path = os.path.join(get_path_prefix(), path)
return path
def load_eipconfig_if_needed(provider_config, eip_config, domain):
"""
Utility function to prime a eip_config object from a loaded
provider_config and the chosen provider domain.
:param provider_config: a loaded instance of ProviderConfig
:type provider_config: ProviderConfig
:param eip_config: the eipconfig object to be primed.
:type eip_config: EIPConfig
:param domain: the chosen provider domain
:type domain: str
:returns: Whether the eip_config object has been succesfully loaded
:rtype: bool
"""
load | ed = eip_config.loaded()
if not loaded:
eip_config_path = get_eipconfig_path(domain)
api_version = provider_config.get_api_version()
eip_conf | ig.set_api_version(api_version)
loaded = eip_config.load(eip_config_path)
return loaded
class VPNGatewaySelector(object):
"""
VPN Gateway selector.
"""
# http://www.timeanddate.com/time/map/
equivalent_timezones = {13: -11, 14: -10}
def __init__(self, eipconfig, tz_offset=None):
'''
Constructor for VPNGatewaySelector.
:param eipconfig: a valid EIP Configuration.
:type eipconfig: EIPConfig
:param tz_offset: use this offset as a local distance to GMT.
:type tz_offset: int
'''
leap_assert_type(eipconfig, EIPConfig)
self._local_offset = tz_offset
if tz_offset is None:
tz_offset = self._get_local_offset()
if tz_offset in self.equivalent_timezones:
tz_offset = self.equivalent_timezones[tz_offset]
self._local_offset = tz_offset
self._eipconfig = eipconfig
def get_gateways_list(self):
"""
Returns the existing gateways, sorted by timezone proximity.
:rtype: list of tuples (location, ip)
(str, IPv4Address or IPv6Address object)
"""
gateways_timezones = []
locations = self._eipconfig.get_locations()
gateways = self._eipconfig.get_gateways()
for idx, gateway in enumerate(gateways):
gateway_location = gateway.get('location')
gateway_distance = 99 # if hasn't location -> should go last
if gateway_location is not None:
timezone = locations[gateway['location']]['timezone']
gateway_name = locations[gateway['location']].get('name', None)
if gateway_name is not None:
gateway_location = gateway_name
gw_offset = int(timezone)
if gw_offset in self.equivalent_timezones:
gw_offset = self.equivalent_timezones[gw_offset]
gateway_distance = self._get_timezone_distance(gw_offset)
ip = self._eipconfig.get_gateway_ip(idx)
gateways_timezones.append((ip, gateway_distance, gateway_location))
gateways_timezones = sorted(gateways_timezones, key=lambda gw: gw[1])
gateways = []
for ip, distance, location in gateways_timezones:
gateways.append((location, ip))
return gateways
def get_gateways(self):
"""
Returns the 4 best gateways, sorted by timezone proximity.
:rtype: list of IPv4Address or IPv6Address object.
"""
gateways = [ip for location, ip in self.get_gateways_list()][:4]
return gateways
def _get_timezone_distance(self, offset):
'''
Returns the distance between the local timezone and
the one with offset 'offset'.
:param offset: the distance of a timezone to GMT.
:type offset: int
:returns: distance between local offset and param offset.
:rtype: int
'''
timezones = range(-11, 13)
tz1 = offset
tz2 = self._local_offset
distance = abs(timezones.index(tz1) - timezones.index(tz2))
if distance > 12:
if tz1 < 0:
distance = timezones.index(tz1) + timezones[::-1].index(tz2)
else:
distance = timezones[::-1].index(tz1) + timezones.index(tz2)
return distance
def _get_local_offset(self):
'''
Returns the distance between GMT and the local timezone.
:rtype: int
'''
local_offset = time.timezone
if time.daylight:
local_offset = time.altzone
return -local_offset / 3600
class EIPConfig(ServiceConfig):
"""
Provider configuration abstraction class
"""
_service_name = "eip"
OPENVPN_ALLOWED_KEYS = ("auth", "cipher", "tls-cipher")
OPENVPN_CIPHERS_REGEX = re.compile("[A-Z0-9\-]+")
def __init__(self):
ServiceConfig.__init__(self)
self._api_version = None
def _get_schema(self):
"""
Returns the schema corresponding to the version given.
:rtype: dict or None if the version is not supported.
"""
return get_schema(self._api_version)
def get_clusters(self):
# TODO: create an abstraction for clusters
return self._safe_get_value("clusters")
def get_gateways(self):
# TODO: create an abstraction for gateways
return self._safe_get_value("gateways")
def get_locations(self):
'''
Returns a list of locations
:rtype: dict
'''
return self._safe_get_value("locations")
def get_openvpn_configuration(self):
"""
Returns a dictionary containing the openvpn configuration
parameters.
These are sanitized with alphanumeric whitelist.
:returns: openvpn configuration dict
:rtype: C{dict}
"""
ovpncfg = self._safe_get_value("openvpn_configuration")
config = {}
for key, value in ovpncfg.items():
if key in self.OPENVPN_ALLOWED_KEYS and value is not None:
sanitized_val = self.OPENVPN_CIPHERS_REGEX.findall(value)
if len(sanitized_val) != 0:
_val = sanitized_val[0]
config[str(key)] = str(_val)
return config
def get_serial(self):
return self._safe_get_value("serial")
def get_version(self):
return self._safe_get_value("version")
def get_gateway_ip(self, index=0):
"""
Returns the ip of the gateway.
:rtype: An IPv4Address or IPv6Address object.
"""
gateways = self.get_gateways()
leap_assert(len(gateways) > 0, |
iwm911/plaso | plaso/parsers/plist_plugins/airport_test.py | Python | apache-2.0 | 2,576 | 0.001941 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the airport plist plugin."""
impor | t unittest
# pylint: disable=unused-import
from plaso.formatters import plist as plist_formatter
from plaso.lib import event
from plaso.parsers import plist
from plaso.parsers.plist_plugins import airport
from plaso.parsers.plist_plugins import test_lib
class AirportPluginTest(test_lib.PlistPluginTestCase):
"""Tests for the airport plist plugin."""
def setUp(self):
"""Sets up the needed objects | used throughout the test."""
self._plugin = airport.AirportPlugin(None)
self._parser = plist.PlistParser(event.PreprocessObject(), None)
def testProcess(self):
"""Tests the Process function."""
test_file = self._GetTestFilePath(['com.apple.airport.preferences.plist'])
plist_name = 'com.apple.airport.preferences.plist'
events = self._ParsePlistFileWithPlugin(
self._parser, self._plugin, test_file, plist_name)
event_objects = self._GetEventObjects(events)
self.assertEquals(len(event_objects), 4)
timestamps = []
for event_object in event_objects:
timestamps.append(event_object.timestamp)
expected_timestamps = frozenset([
1375144166000000, 1386874984000000, 1386949546000000,
1386950747000000])
self.assertTrue(set(timestamps) == expected_timestamps)
event_object = event_objects[0]
self.assertEqual(event_object.key, u'item')
self.assertEqual(event_object.root, u'/RememberedNetworks')
expected_desc = (
u'[WiFi] Connected to network: <europa> using security '
u'WPA/WPA2 Personal')
self.assertEqual(event_object.desc, expected_desc)
expected_string = u'/RememberedNetworks/item {}'.format(expected_desc)
expected_short = expected_string[:77] + u'...'
self._TestGetMessageStrings(
event_object, expected_string, expected_short)
if __name__ == '__main__':
unittest.main()
|
Natalia-28028/kpk2016 | task_dm4.py | Python | gpl-3.0 | 328 | 0.006098 | from drawman import *
from time | import sleep
scale = 100
def f(x):
return x**2
x = 0
y = 0
dx = 0.1
while x < 3:
dy = f(x + dx) - y
x = x + dx
y = f(x)
shift(dx*scale, dy*scale)
x = 0
y = 0
dx = -0.1
while x > -3:
dy = f(x + dx) - y
x = x + dx
y = f(x)
shift | (dx*scale, dy*scale)
sleep(1)
|
cbrichford/async-mock | async_mock/coroutine.py | Python | bsd-3-clause | 3,037 | 0.001317 | import asyncio
from unittest.mock import MagicMock
def SimpleCoroutineMock(f=lambda *args, **kwargs: None):
builder = CoroutineMockBuilder()
return builder.addDelegate(f).build().mock()
class CoroutineMock(object):
# Handy for debugging failing tests in the debugger.
__blocking_dict = {}
def __init__(self, returnSequence, block:asyncio.Event):
self.__startingEvent = asyncio.Event()
self.__endingEvent = asyncio.Event()
self.__returnSequence = tuple(returnSequence)
if (len(self.__returnSequence) < 1):
self.__returnSequence = (lambda *args, **kwargs: None, )
self.__returnSequenceLen = len(self.__returnSequence)
self.__block = block
self.__mock = self.__createMock()
# It's easier to find a dictionary that is an instance variable than
# one that is a class static, so just make an instance variable that
# references the shared dictionary.
self.__blocking_dict = CoroutineMock.__blocking_dict
def __createMock(self):
returnIndex = 0
async def cr(*args, **kwargs):
nonlocal returnIndex
try:
self.__endingEvent.clear()
self.__startingEvent.set()
if (self.__block is not None):
self.__blocking_dict[id(self)] = self
try:
await self.__block.wait()
finally:
del self.__blocking_dict[id(self)]
self.__block.clear()
returnFunc = self.__returnSequence[returnIndex % self.__returnSequenceLen]
returnIndex += 1
return returnFunc(*args, **kwargs)
finally:
| self.__startingEvent.clear()
self.__endingEvent.set()
return MagicMock(wraps=cr)
def start(self):
return self.__startingEvent
def end(self):
return self.__endingEvent
def unblock(self):
| self.__block.set()
def mock(self):
return self.__mock
async def waitForSingleCall(self):
await self.start().wait()
self.unblock()
await self.end().wait()
class CoroutineMockBuilder(object):
def __init__(self):
self.__block = None
self.__returnSequence = []
def blocks(self):
return self.blocksOn(asyncio.Event())
def blocksOn(self, event:asyncio.Event):
self.__block = event
return self
def exception(self, e, repeats=1):
def r(*args, **kwargs):
raise e
self.__returnSequence.extend([r] * repeats)
return self
def returns(self, v, repeats=1):
def r(*args, **kwargs):
return v
self.__returnSequence.extend([r] * repeats)
return self
def addDelegate(self, f, repeats=1):
self.__returnSequence.extend([f] * repeats)
return self
def build(self):
return CoroutineMock(self.__returnSequence, self.__block)
|
tijptjik/thegodsproject | plugins/i18n_subsites/i18n_subsites.py | Python | mit | 17,012 | 0.00047 | """i18n_subsites plugin creates i18n-ized subsites of the default site
This plugin is designed for Pelican 3.4 and later
"""
import os
import six
import logging
import posixpath
from copy import copy
from itertools import chain
from operator import attrgetter
from collections import OrderedDict
from contextlib import contextmanager
from six.moves.urllib.parse import urlparse
import gettext
import locale
from pelican import signals
from pelican.generators import ArticlesGenerator, PagesGenerator
from pelican.settings import configure_settings
from pelican.contents import Draft
# Global vars
_MAIN_SETTINGS = None # settings dict of the main Pelican instance
_MAIN_LANG = None # lang of the main Pelican instance
_MAIN_SITEURL = None # siteurl of the main Pelican instance
_MAIN_STATIC_FILES = None # list of Static instances the main Pelican instance
_SUBSITE_QUEUE = {} # map: lang -> settings overrides
_SITE_DB = OrderedDict() # OrderedDict: lang -> siteurl
_SITES_RELPATH_DB = {} # map: (lang, base_lang) -> relpath
# map: generator -> list of removed contents that need interlinking
_GENERATOR_DB = {}
_NATIVE_CONTENT_URL_DB = {} # map: source_path -> content in its native lang
_LOGGER = logging.getLogger(__name__)
@contextmanager
def temporary_locale(temp_locale=None):
'''Enable code to run in a context with a temporary locale
Resets the locale back when exiting context.
Can set a temporary locale if provided
'''
orig_locale = locale.setlocale(locale.LC_ALL)
if temp_locale is not None:
locale.setlocale(locale.LC_ALL, temp_locale)
yield
locale.setlocale(locale.LC_ALL, orig_locale)
def initialize_dbs(settings):
'''Initialize internal DBs using the Pelican settings dict
This clears the DBs for e.g. autoreload mode to work
'''
global _MAIN_SETTINGS, _MAIN_SITEURL, _MAIN_LANG, _SUBSITE_QUEUE
_MAIN_SETTINGS = settings
_MAIN_LANG = settings['DEFAULT_LANG']
_MAIN_SITEURL = settings['SITEURL']
_SUBSITE_QUEUE = settings.get('I18N_SUBSITES', {}).copy()
prepare_site_db_and_overrides()
# clear databases in case of autoreload mode
_SITES_RELPATH_DB.clear()
_NATIVE_CONTENT_URL_DB.clear()
_GENERATOR_DB.clear()
def prepare_site_db_and_overrides():
'''Prepare overrides and create _SITE_DB
_SITE_DB.keys() need to be ready for filter_translations
'''
_SITE_DB.clear()
_SITE_DB[_MAIN_LANG] = _MAIN_SITEURL
# make sure it works for both root-relative and absolute
main_siteurl = '/' if _MAIN_SITEURL == '' else _MAIN_SITEURL
for lang, overrides in _SUBSITE_QUEUE.items():
if 'SITEURL' not in overrides:
overrides['SITEURL'] = posixpath.join(main_siteurl, lang)
_SITE_DB[lang] = overrides['SITEURL']
# default subsite hierarchy
if 'OUTPUT_PATH' not in overrides:
overrides['OUTPUT_PATH'] = os.path.join(
_MAIN_SETTINGS['OUTPUT_PATH'], lang)
if 'CACHE_PATH' not in overrides:
overrides['CACHE_PATH'] = os.path.join(
_MAIN_SETTINGS['CACHE_PATH'], lang)
if 'STATIC_PATHS' not in overrides:
overrides['STATIC_PATHS'] = []
if ('THEME' not in overrides and 'THEME_STATIC_DIR' not in overrides and
'THEME_STATIC_PATHS' not in overrides):
relpath = relpath_to_site(lang, _MAIN_LANG)
overrides['THEME_STATIC_DIR'] = posixpath.join(
relpath, _MAIN_SETTINGS['THEME_STATIC_DIR'])
overrides['THEME_STATIC_PATHS'] = []
# to change what is perceived as translations
overrides['DEFAULT_LANG'] = lang
def subscribe_filter_to_signals(settings):
'''Subscribe content filter to requested signals'''
for sig in settings.get('I18N_FILTER_SIGNALS', []):
sig.connect(filter_contents_translations)
def initialize_plugin(pelican_obj):
'''Initialize plugin variables and Pelican settings'''
if _MAIN_SETTINGS is None:
initialize_dbs(pelican_obj.settings)
subscribe_filter_to_signals(pelican_obj.settings)
def get_site_path(url):
'''Get the path component of an url, excludes siteurl
also normalizes '' to '/' for relpath to work,
otherwise it could be interpreted as a relative filesystem path
'''
path = urlparse(url).path
if path == '':
path = '/'
return path
def relpath_to_site(lang, target_lang):
'''Get relative path from siteurl of lang to siteurl of base_lang
the output is cached in _SITES_RELPATH_DB
'''
path = _SITES_RELPATH_DB.get((lang, target_lang), None)
if path is None:
siteurl = _SITE_DB.get(lang, _MAIN_SITEURL)
target_siteurl = _SITE_DB.get(target_lang, _MAIN_SITEURL)
path = posixpath.relpath(get_site_path(target_siteurl),
get_site_path(siteurl))
_SITES_RELPATH_DB[(lang, target_lang)] = path
return path
def save_generator(generator):
'''Save the generator for later use
initialize the removed content list
'''
_GENERATOR_DB[generator] = []
def article2draft(article):
'''Transform an Article to Draft'''
draft = Draft(article._content, article.metadata, article.settings,
article.source_path, article._context)
draft.status = 'draft'
return draft
def page2hidden_page(page):
| '''Transform a Page to a hidden Page'''
page.status = 'hidden'
return page
class GeneratorInspector(object):
'''Inspector of generator instances'''
generators_info = {
ArticlesGenerator: {
'tran | slations_lists': ['translations', 'drafts_translations'],
'contents_lists': [('articles', 'drafts')],
'hiding_func': article2draft,
'policy': 'I18N_UNTRANSLATED_ARTICLES',
},
PagesGenerator: {
'translations_lists': ['translations', 'hidden_translations'],
'contents_lists': [('pages', 'hidden_pages')],
'hiding_func': page2hidden_page,
'policy': 'I18N_UNTRANSLATED_PAGES',
},
}
def __init__(self, generator):
'''Identify the best known class of the generator instance
The class '''
self.generator = generator
self.generators_info.update(generator.settings.get(
'I18N_GENERATORS_INFO', {}))
for cls in generator.__class__.__mro__:
if cls in self.generators_info:
self.info = self.generators_info[cls]
break
else:
self.info = {}
def translations_lists(self):
'''Iterator over lists of content translations'''
return (getattr(self.generator, name) for name in
self.info.get('translations_lists', []))
def contents_list_pairs(self):
'''Iterator over pairs of normal and hidden contents'''
return (tuple(getattr(self.generator, name) for name in names)
for names in self.info.get('contents_lists', []))
def hiding_function(self):
'''Function for transforming content to a hidden version'''
hiding_func = self.info.get('hiding_func', lambda x: x)
return hiding_func
def untranslated_policy(self, default):
'''Get the policy for untranslated content'''
return self.generator.settings.get(self.info.get('policy', None),
default)
def all_contents(self):
'''Iterator over all contents'''
translations_iterator = chain(*self.translations_lists())
return chain(translations_iterator,
*(pair[i] for pair in self.contents_list_pairs()
for i in (0, 1)))
def filter_contents_translations(generator):
'''Filter the content and translations lists of a generator
Filters out
1) translations which will be generated in a different site
2) content that is not in the language of the currently
generated site but in that of a different site, content in a
language which has no site is generated always. The filtering
method bay b |
angus-ai/angus-service-facedetection | angus/services/__init__.py | Python | apache-2.0 | 915 | 0 | # -*- coding: utf-8 -*- |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable l | aw or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Service for face detection.
"""
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
|
odevsp/ckanext-api_basic_functions | setup.py | Python | agpl-3.0 | 3,241 | 0.001543 | from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='''ckanext-api_basic_functions''',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='0.0.1',
description='''API with functions to execute SQL queries or re index solr.''',
long_description=long_description,
# The project's main homepage.
url='https://github.com/odevsp/ckanext-api_basic_functions',
# Author details
author='''odevsp''',
author_email='''jroigfer@everis.com''',
# Choose your license
license='AGPL',
# See https://p | ypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Pick your license as you wish (should match "licens | e" above)
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='''CKAN extension solr reindex query database''',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=[],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
include_package_data=True,
package_data={
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points='''
[ckan.plugins]
api_basic_functions=ckanext.api_basic_functions.plugin:Api_Basic_FunctionsPlugin
''',
)
|
McGillX/edx_data_research | edx_data_research/reporting/basic/forum_body_extraction_for_word_cloud.py | Python | mit | 1,989 | 0.004525 | '''
This module extracts all of the comments and comment threads from the forum of
a given course. Using NLTK, all of the comments and comment threads will be
parsed into a list of words which will be then used to create a word cloud
using http://www.wordle.net/create
Usage:
python forum_body_extraction_for_word_cloud.py > word_cloud.txt
The above command would get the top 1000 words and print those words the
number of times they were used and the result is outputted to a text file,
e.g. word_cloud.txt. User can copy the text in word_cloud.txt and paste in
http://www.wordle.net/create to generate the word cloud
'''
import nltk
from nltk.corpus import stopwords
import json
import heapq
from common.base_edx import EdXConnection
from common.generate_csv_report import CSV
connection = EdXConnection('forum')
collection = connection.get_access_to_collection()
cursor = collection['forum'].find()
body_data = [item['body'] for item in cursor]
# Words that are not required to be considered in the word cloud can be added
# to the list ignore_words. If user would like to pass this list via command
# line, the user can do the following:
# import sys
# ignore_word = sys.argv[1]
# where sys.argv[1] is of the format ['http', 'https'] and usage becomes:
# python forum_body_extraction_for_word_cloud.py list_of_ignore_words > word_cloud.txt
ignore_words = [ | 'http', 'https | ']
tokens = []
for item in body_data:
words = []
for word in nltk.word_tokenize(item):
clean_word = word.lower().encode('utf-8').strip('\'\"\-,.:;!?()[]{}=#*_$/%+&<>')
if clean_word and clean_word not in stopwords.words('english') and len(clean_word) > 2 and '\'' not in clean_word and clean_word not in ignore_words:
words.append(clean_word)
tokens.extend(words)
fd = nltk.FreqDist(tokens)
top_1000 = heapq.nlargest(1000, fd, key=fd.get)
top_1000_list = []
for item in top_1000:
top_1000_list.extend([str(item)] * fd[item])
print ' '.join(top_1000_list)
|
SachinKonan/Windows-RPI-Vision-Framework | HttpServer/check.py | Python | mit | 95 | 0.010526 | from urllib.request import urlopen
html = urlopen("http://12 | 7.0.0.1:9090/cam.mjpg" | )
print(html) |
Fritzip/AGGP | src/main.py | Python | gpl-2.0 | 997 | 0.016048 | #! /usr/bin/python
# -*- coding: utf-8 -*-
# Dependancies : networkx, numpy, graphviz, matplotlib
## python main.py -h
## usage: main.py [options]
## Biological Graph Generator
## optional arguments:
## -h, --help show this help messag | e and exit
## -p, --param Ask for every parameters of the simulation
## -v, --verbose
## -q, --quiet
## --no-progress Disable the progress bar
## -f FREQ, --freq FREQ Frequency of displaying informations
## -s, --no-save Do not save and plot individuals
## -t, --stat Plot final | stats graphs
## -d, --delete Delete all output (files, graphs and pictures) from
## previous run
from population import *
####################################################################
# Main
####################################################################
if __name__ == "__main__":
a = Population()
a.genetic_algo()
|
Paytokens/paytokensd | lib/callback.py | Python | mit | 6,281 | 0.00462 | #! /usr/bin/python3
"""Callback a callable asset."""
import struct
import decimal
D = decimal.Decimal
from . import (util, config, exceptions, litecoin, util)
from . import order
FORMAT = '>dQ'
LENGTH = 8 + 8
ID = 21
def validate (db, source, fraction, asset, block_time, block_index, parse):
cursor = db.cursor()
problems = []
# TODO
if not config.TESTNET:
problems.append('callbacks are currently disabled on mainnet')
return None, None, None, problems
# TODO
if fraction > 1:
problems.append('fraction greater than one')
elif fraction <= 0:
problems.append('non‐positive fraction')
issuances = list(cursor.execute('''SELECT * FROM issuances WHERE (status = ? AND asset = ?)''', ('valid', asset)))
if not issuances:
problems.append('no such asset, {}.'.format(asset))
return None, None, None, problems
else:
last_issuance = issuances[-1]
if last_issuance['issuer'] != source:
problems.append('not asset owner')
return None, None, None, problems
if not last_issuance['callable']:
problems.append('uncallable asset')
return None, None, None, problems
elif last_issuance['call_date'] > block_time: problems.append('before call date')
call_price = round(last_issuance['call_price'], 6) # TODO: arbitrary
divisible = last_issuance['divisible']
if not divisible: # Pay per output unit.
call_price *= config.UNIT
# If parsing, unescrow all funds of asset. (Order of operations is
# important here.)
if parse:
# Cancel pending order matches involving asset.
cursor.execute('''SELECT * from order_matches \
WHERE status = ? AND (forward_asset = ? OR backward_asset = ?)''', ('pending', asset, asset))
for order_match in list(cursor):
order.cancel_order_match(db, order_match, 'cancelled', block_index)
# Cancel open orders involving asset.
cursor.execute('''SELECT * from orders \
WHERE status = ? AND (give_asset = ? OR get_asset = ?)''', ('open', asset, asset))
for order_element in list(cursor):
order.cancel_order(db, order_element, 'cancelled', block_index)
# Calculate callback quantities.
holders = util.holders(db, asset)
outputs = []
for holder in holders:
# If composing (and not parsing), predict funds to be returned from
# escrow (instead of cancelling open offers, etc.), by *not* skipping
# listing escrowed funds here.
if parse and holder['escrow']:
continue
address = holder['address']
address_quantity = holder['address_quantity']
if address == source or address_quantity == 0: continue
callback_quantity = int(address_quantity * fraction) # Round down.
fraction_actual = callback_quantity / address_quantity
outputs.append({'address': address, 'address_quantity': address_quantity, 'callback_quantity': callback_quantity, 'fraction_actual': fraction_actual})
callback_total = sum([output['callback_quantity'] for output in outputs])
if not callback_total: problems.append('nothing called back')
balances = list(cursor.execute('''SELECT * FROM balances WHERE (address = ? AND asset = ?)''', (source, config.XPT)))
if not balances or balances[0]['quantity'] < (call_price * callback_total):
problems.append('insufficient funds')
cursor.close()
return call_price, callback_total, outputs, problems
def compose (db, source, fraction, asset):
call_price, callback_total, outputs, problems = validate(db, source, fraction, asset, util.last_block(db)['block_time'], util.last_block(db)['block_index'], parse=False)
if problems: raise exceptions.CallbackError(problems)
print('Total quantity to be called back:', util.devise(db, callback_total, asset, 'output'), asset)
asset_id = util.asset_id(asset)
data = struct.pack(config.TXTYPE_FORMAT, ID)
data += struct.pack(FORMAT, fraction, asset_id)
return (source, [], data)
def parse (db, tx, message):
callback_parse_cursor = db.cursor()
# Unpack message.
try:
if len(message) != LENGTH:
raise exceptions.UnpackError
fraction, asset_id = struct.unpack(FORMAT, message)
asset = util.asset_name(asset_id)
status = 'valid'
except (exceptions.UnpackError, exceptions.AssetNameError, struct.error) as e:
fraction, asset = None, None
status = 'invalid: could not unpack'
if status == 'valid':
call_price, callback_total, outputs, problems = validate(db, tx['source'], fraction, asset, tx['block_time'], tx['block_index'], parse=True)
if problems: status = 'invalid: ' + '; '.join(problems)
if status == 'valid':
# Issuer.
assert call_price * callback_total == int(call_price * callback_total)
util.debit(db, tx['block_index'], tx['source'], config.XPT, int(call_price * callback_total), action='callback', event=tx['tx_hash'])
util.credit(db, tx['block_ind | ex'], tx['source'], asset, callback_total, action='callback', event=tx['tx_hash'])
# Holders.
for output in outputs:
assert call_price * output['callback_quantity'] == int(call_price * output['callback_quantity'])
util.debit(db, tx['block_index'], output['address'], asset, output['callback_quantity'], action='callback', event=tx['tx_hash'])
util.credit(db, tx['block_index'], output | ['address'], config.XPT, int(call_price * output['callback_quantity']), action='callback', event=tx['tx_hash'])
# Add parsed transaction to message-type–specific table.
bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'fraction': fraction,
'asset': asset,
'status': status,
}
sql='insert into callbacks values(:tx_index, :tx_hash, :block_index, :source, :fraction, :asset, :status)'
callback_parse_cursor.execute(sql, bindings)
callback_parse_cursor.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
hecchi777/S3-SlaacSecuritySolution | impacket-0.9.11/impacket/eap.py | Python | apache-2.0 | 1,437 | 0.011134 |
# Copyright (c) 2003-2013 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# $Id$
#
# Description:
# EAP packets
#
# Author:
# Aureliano Calvo
from impacket.helper import ProtocolPacket, Byte, Word, Long, ThreeBytesBigEndian
DOT1X_AUTHENTICATION = 0x888E
class EAPExp | anded(ProtocolPacket):
"""EAP expanded data according to RFC 3748, section 5.7"""
WFA_SMI = 0x00372a
SIMPLE_CONFIG = 0x00000001
header_size = 7
tail_size = 0
vendor_id = ThreeBytesBigEndian(0)
vendor_type = Long(3, ">")
class EAPR(ProtocolPacket):
"""It represents a request or a response in EAP (codes 1 and 2)"""
IDENTITY = 0x01
| EXPANDED = 0xfe
header_size = 1
tail_size = 0
type = Byte(0)
class EAP(ProtocolPacket):
REQUEST = 0x01
RESPONSE = 0x02
SUCCESS = 0x03
FAILURE = 0x04
header_size = 4
tail_size = 0
code = Byte(0)
identifier = Byte(1)
length = Word(2, ">")
class EAPOL(ProtocolPacket):
EAP_PACKET = 0x00
EAPOL_START = 0x01
EAPOL_LOGOFF = 0x02
EAPOL_KEY = 0x03
EAPOL_ENCAPSULATED_ASF_ALERT = 0x04
DOT1X_VERSION = 0x01
header_size = 4
tail_size = 0
version = Byte(0)
packet_type = Byte(1)
body_length = Word(2, ">")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.