repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
gnrfan/django-mailgun-validation | mailgun_validation/exceptions.py | Python | bsd-3-clause | 44 | 0 | class | MailgunException(Exception):
pass
| |
quru/qis | src/imageserver/auxiliary/util.py | Python | agpl-3.0 | 3,767 | 0.002389 | #
# Quru Image Server
#
# Document: util.py
# Date started: 27 Mar 2018
# By: Matt Fozard
# Purpose: Auxiliary server utilities
# Requires:
# Copyright: Quru Ltd (www.quru.com)
# Licence:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
#
# Last Changed: $Date$ $Rev$ by $Author$
#
# Notable modifications:
# Date By Details
# ========= ==== ============================================================
# 27Mar2018 Matt Code moved here from task_server, refactored other servers
#
import os
import time
from multiprocessing import Process
from imageserver.filesystem_manager import get_abs_path
from imageserver.util import get_computer_hostname
def store | _pid(proc_name, pid_val):
"""
Writes the current process ID to a hidden file in the image server file system.
Raises an IOError or OSError if the pid file cannot be written.
"""
pid_dir = os.path.dirname(_get_pidfile_path(proc_name))
if not os.path.exists(pid_dir):
| os.mkdir(pid_dir)
with open(_get_pidfile_path(proc_name), 'wt', encoding='utf8') as f:
f.write(pid_val)
def get_pid(proc_name):
"""
Returns the last value written by _store_pid() as a string,
or an empty string if _store_pid() has not been called before.
Raises an IOError or OSError if the pid file exists but cannot be read.
"""
if os.path.exists(_get_pidfile_path(proc_name)):
with open(_get_pidfile_path(proc_name), 'rt', encoding='utf8') as f:
return f.read()
return ''
def _get_pidfile_path(proc_name):
"""
Returns a path for a PID file, incorporating the given process name and this
computer's host name (for the case when multiple servers are sharing the same
back-end file system).
"""
return os.path.join(
get_abs_path('/'),
'.metadata',
get_computer_hostname() + '.' + proc_name + '.pid'
)
def double_fork(process_name, process_function, process_args):
"""
Forks twice, leaving 'target_function' running as a separate grand-child process.
This fully detaches the final process from the parent process, avoiding issues
with the parent having to wait() for the child (or else becoming a zombie process)
or the parent's controlling terminal (if it has one) being closed. This is better
explained at: http://www.faqs.org/faqs/unix-faq/programmer/faq/
"""
def _double_fork():
p = Process(
target=process_function,
name=process_name,
args=process_args
)
# Do not kill the target_function process when this process exits
p.daemon = False
# Start target_function as the grand-child process
p.start()
# Force the child exit, leaving the grand-child still running.
# The parent process can now exit cleanly without waiting to wait() or join()
# on the grand-child (and it can't, since it knows nothing about it).
os._exit(0)
# Start _double_fork as the child process and wait() for it to complete (is quick)
p = Process(target=_double_fork)
p.start()
p.join()
|
HeathKang/flasky | app/exceptions.py | Python | mit | 90 | 0 | # | !/usr/bin/python
# -*- coding: utf-8 -*-
class ValidationError(ValueError):
pa | ss
|
skrah/ndtypes | python/ndt_randtype.py | Python | bsd-3-clause | 16,372 | 0.006719 | #
# BSD 3-Clause License
#
# Copyright (c) 2017-2018, plures
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Functions for generating test cases.
import sys
from itertools import accumulate, count, product
from collections import namedtuple
from random import randrange
from ndtypes import ndt, ApplySpec
from _testbuffer import get_sizeof_void_p
SIZEOF_PTR = get_sizeof_void_p()
Mem = namedtuple("Mem", "itemsize align")
# ======================================================================
# Check contiguous fixed dimensions
# ======================================================================
def c_datasize(t):
"""Check the datasize of contiguous arrays."""
datasize = t.itemsize
for v in t.shape:
datasize *= v
return datasize
# ======================================================================
# | Check fixed dimensions with arbitary strides
# ======================================================================
def verify_datasize(t):
"""Verify the datasize of fixed dimensions with arbitrary strides."""
if t.itemsize == 0:
return t.datasize == 0
if t.datasize % t.itemsize:
return False
if t.ndim <= 0:
return t.ndim == 0 and not t.shape and not t.strides
if any( | v < 0 for v in t.shape):
return False
if any(v % t.itemsize for v in t.strides):
return False
if 0 in t.shape:
return t.datasize == 0
imin = sum(t.strides[j]*(t.shape[j]-1) for j in range(t.ndim)
if t.strides[j] <= 0)
imax = sum(t.strides[j]*(t.shape[j]-1) for j in range(t.ndim)
if t.strides[j] > 0)
return t.datasize == (abs(imin) + imax + t.itemsize)
# ======================================================================
# Typed values
# ======================================================================
DTYPE_TEST_CASES = [
# Tuples
("()", Mem(itemsize=0, align=1)),
("(complex128)", Mem(itemsize=16, align=8)),
("(int8, int64)", Mem(itemsize=16, align=8)),
("(int8, int64, pack=1)", Mem(itemsize=9, align=1)),
("(int8, int64, pack=2)", Mem(itemsize=10, align=2)),
("(int8, int64, pack=4)", Mem(itemsize=12, align=4)),
("(int8, int64, pack=8)", Mem(itemsize=16, align=8)),
("(int8, int64, pack=16)", Mem(itemsize=32, align=16)),
("(int8, int64, align=1)", Mem(itemsize=16, align=8)),
("(int8, int64, align=2)", Mem(itemsize=16, align=8)),
("(int8, int64, align=4)", Mem(itemsize=16, align=8)),
("(int8, int64, align=8)", Mem(itemsize=16, align=8)),
("(int8, int64, align=16)", Mem(itemsize=16, align=16)),
("(int8 |align=1|, int64)", Mem(itemsize=16, align=8)),
("(int8 |align=2|, int64)", Mem(itemsize=16, align=8)),
("(int8 |align=4|, int64)", Mem(itemsize=16, align=8)),
("(int8 |align=8|, int64)", Mem(itemsize=16, align=8)),
("(int8 |align=16|, int64)", Mem(itemsize=16, align=16)),
("(uint16, (complex64))", Mem(itemsize=12, align=4)),
("(uint16, (complex64), pack=1)", Mem(itemsize=10, align=1)),
("(uint16, (complex64), pack=2)", Mem(itemsize=10, align=2)),
("(uint16, (complex64), pack=4)", Mem(itemsize=12, align=4)),
("(uint16, (complex64), pack=8)", Mem(itemsize=16, align=8)),
("(uint16, (complex64), align=1)", Mem(itemsize=12, align=4)),
("(uint16, (complex64), align=2)", Mem(itemsize=12, align=4)),
("(uint16, (complex64), align=4)", Mem(itemsize=12, align=4)),
("(uint16, (complex64), align=8)", Mem(itemsize=16, align=8)),
# References to tuples
("&(uint16, (complex64), align=1)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("(uint16, &(complex64), pack=1)", Mem(itemsize=2+SIZEOF_PTR, align=1)),
# Constructor containing references to tuples
("Some(&(uint16, (complex64), align=1))", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("Some((uint16, &(complex64), pack=1))", Mem(itemsize=2+SIZEOF_PTR, align=1)),
# Optional tuples
("?(uint16, (complex64), align=1)", Mem(itemsize=12, align=4)),
("(uint16, ?(complex64), align=1)", Mem(itemsize=12, align=4)),
("?(uint16, ?(complex64), align=1)", Mem(itemsize=12, align=4)),
("?(uint16, (complex64), align=2)", Mem(itemsize=12, align=4)),
("(uint16, ?(complex64), align=4)", Mem(itemsize=12, align=4)),
("?(uint16, ?(complex64), align=8)", Mem(itemsize=16, align=8)),
# References to optional tuples or tuples with optional subtrees
("&?(uint16, (complex64), align=1)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&(uint16, ?(complex64), align=1)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
# Constructor containing optional tuples or tuples with optional subtrees
("Some(?(uint16, (complex64), align=1))", Mem(itemsize=12, align=4)),
("Some((uint16, ?(complex64), align=1))", Mem(itemsize=12, align=4)),
# Records
("{}", Mem(itemsize=0, align=1)),
("{x: complex128}", Mem(itemsize=16, align=8)),
("{x: int8, y: int64}", Mem(itemsize=16, align=8)),
("{x: int8, y: int64, pack=1}", Mem(itemsize=9, align=1)),
("{x: int8, y: int64, pack=2}", Mem(itemsize=10, align=2)),
("{x: int8, y: int64, pack=4}", Mem(itemsize=12, align=4)),
("{x: int8, y: int64, pack=8}", Mem(itemsize=16, align=8)),
("{x: int8, y: int64, pack=16}", Mem(itemsize=32, align=16)),
("{x: uint16, y: {z: complex128}}", Mem(itemsize=24, align=8)),
("{x: uint16, y: {z: complex128, align=16}}", Mem(itemsize=32, align=16)),
("{x: uint16, y: {z: complex128}, align=16}", Mem(itemsize=32, align=16)),
# Primitive types
("bool", Mem(itemsize=1, align=1)),
("int8", Mem(itemsize=1, align=1)),
("int16", Mem(itemsize=2, align=2)),
("int32", Mem(itemsize=4, align=4)),
("int64", Mem(itemsize=8, align=8)),
("uint8", Mem(itemsize=1, align=1)),
("uint16", Mem(itemsize=2, align=2)),
("uint32", Mem(itemsize=4, align=4)),
("uint64", Mem(itemsize=8, align=8)),
("float32", Mem(itemsize=4, align=4)),
("float64", Mem(itemsize=8, align=8)),
("complex64", Mem(itemsize=8, align=4)),
("complex128", Mem(itemsize=16, align=8)),
# Primitive optional types
("?bool", Mem(itemsize=1, align=1)),
("?int8", Mem(itemsize=1, align=1)),
("?int16", Mem(itemsize=2, align=2)),
("?int32", Mem(itemsize=4, align=4)),
("?int64", Mem(itemsize=8, align=8)),
("?uint8", Mem(itemsize=1, align=1)),
("?uint16", Mem(itemsize=2, align=2)),
("?uint32", Mem(itemsize=4, align=4)),
("?uint64", Mem(itemsize=8, align=8)),
("?float32", Mem(itemsize=4, align=4)),
("?float64", Mem(itemsize=8, align=8)),
("?complex64", Mem(itemsize=8, align=4)),
("?complex128", Mem(itemsize=16, align=8)),
# References
(" |
zetaops/zengine | tests/views/jump_to.py | Python | gpl-3.0 | 692 | 0 | # -*- coding: utf-8 -*-
"""
"""
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
# testing if we are preserving task_data between wf jumps
def main_wf(current):
current.task_data['from_main'] = True
current.output['from_jumped'] = current. | task_data.get('from_jumped')
assert current.workflow.name == 'jump_to_wf'
def jumped_wf(current):
current.output['from_main'] = current.task_data['from_main']
current.task_data['from_jumped'] = True
assert current.workflow.name == 'jump_to_wf2'
def set_external_wf(current):
curr | ent.task_data['external_wf'] = 'jump_to_wf2'
|
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/pandas/tseries/tests/test_resample.py | Python | artistic-2.0 | 65,806 | 0.0031 | # pylint: disable=E1101
from datetime import datetime, timedelta
from functools import partial
from pandas.compat import range, lrange, zip, product
import numpy as np
from pandas import (Series, TimeSeries, DataFrame, Panel, Index,
isnull, notnull, Timestamp)
from pandas.core.groupby import DataError
from pandas.tseries.index import date_range
from pandas.tseries.tdi import timedelta_range
from pandas.tseries.offsets import Minute, BDay
from pandas.tseries.period import period_range, PeriodIndex, Period
from pandas.tseries.resample import DatetimeIndex, TimeGrouper
from pandas.tseries.frequencies import MONTHS, DAYS
import pandas.tseries.offsets as offsets
import pandas as pd
import nose
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal)
import pandas.util.testing as tm
bday = BDay()
class TestResample(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
self.assertEqual(g.ngroups, 2593)
self.assertTrue(notnull(g.mean()).all())
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
self.assertEqual(len(r.columns), 10)
self.assertEqual(len(r.index), 2593)
def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', how='mean', closed='right', label='right')
exp_idx = date_range('1/1/2000', periods=4, freq='5min', name='index')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=exp_idx)
assert_series_equal(result, expected)
self.assertEqual(result.index.name, 'index')
result = s.resample('5min', how='mean', closed='left', label='right')
exp_idx = date_range('1/1/2000 00:05', periods=3, freq='5min', name='index')
expected = Series([s[:5].mean(), s[5:10].mean(), s[10:].mean()], index=exp_idx)
assert_series_equal(result, expected)
s = self.series
result = s.resample('5Min', how='last')
grouper = TimeGroupe | r(Minute(5), closed='left', label='left')
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
def test_resample_how(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00',
freq='min', name='index')
s = Series(np.random.randn(14), index=rng)
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
| args = ['sum', 'mean', 'std', 'sem', 'max', 'min',
'median', 'first', 'last', 'ohlc']
def _ohlc(group):
if isnull(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
inds = date_range('1/1/2000', periods=4, freq='5min', name='index')
for arg in args:
if arg == 'ohlc':
func = _ohlc
else:
func = arg
try:
result = s.resample('5min', how=arg,
closed='right', label='right')
expected = s.groupby(grouplist).agg(func)
self.assertEqual(result.index.name, 'index')
if arg == 'ohlc':
expected = DataFrame(expected.values.tolist())
expected.columns = ['open', 'high', 'low', 'close']
expected.index = Index(inds, name='index')
assert_frame_equal(result, expected)
else:
expected.index = inds
assert_series_equal(result, expected)
except BaseException as exc:
exc.args += ('how=%s' % arg,)
raise
def test_resample_how_callables(self):
# GH 7929
data = np.arange(5, dtype=np.int64)
ind = pd.DatetimeIndex(start='2014-01-01', periods=len(data), freq='d')
df = pd.DataFrame({"A": data, "B": data}, index=ind)
def fn(x, a=1):
return str(type(x))
class fn_class:
def __call__(self, x):
return str(type(x))
df_standard = df.resample("M", how=fn)
df_lambda = df.resample("M", how=lambda x: str(type(x)))
df_partial = df.resample("M", how=partial(fn))
df_partial2 = df.resample("M", how=partial(fn, a=2))
df_class = df.resample("M", how=fn_class())
assert_frame_equal(df_standard, df_lambda)
assert_frame_equal(df_standard, df_partial)
assert_frame_equal(df_standard, df_partial2)
assert_frame_equal(df_standard, df_class)
def test_resample_with_timedeltas(self):
expected = DataFrame({'A' : np.arange(1480)})
expected = expected.groupby(expected.index // 30).sum()
expected.index = pd.timedelta_range('0 days',freq='30T',periods=50)
df = DataFrame({'A' : np.arange(1480)},index=pd.to_timedelta(np.arange(1480),unit='T'))
result = df.resample('30T',how='sum')
assert_frame_equal(result, expected)
def test_resample_rounding(self):
# GH 8371
# odd results when rounding is needed
data = """date,time,value
11-08-2014,00:00:01.093,1
11-08-2014,00:00:02.159,1
11-08-2014,00:00:02.667,1
11-08-2014,00:00:03.175,1
11-08-2014,00:00:07.058,1
11-08-2014,00:00:07.362,1
11-08-2014,00:00:08.324,1
11-08-2014,00:00:08.830,1
11-08-2014,00:00:08.982,1
11-08-2014,00:00:09.815,1
11-08-2014,00:00:10.540,1
11-08-2014,00:00:11.061,1
11-08-2014,00:00:11.617,1
11-08-2014,00:00:13.607,1
11-08-2014,00:00:14.535,1
11-08-2014,00:00:15.525,1
11-08-2014,00:00:17.960,1
11-08-2014,00:00:20.674,1
11-08-2014,00:00:21.191,1"""
from pandas.compat import StringIO
df = pd.read_csv(StringIO(data), parse_dates={'timestamp': ['date', 'time']}, index_col='timestamp')
df.index.name = None
result = df.resample('6s', how='sum')
expected = DataFrame({'value' : [4,9,4,2]},index=date_range('2014-11-08',freq='6s',periods=4))
assert_frame_equal(result,expected)
result = df.resample('7s', how='sum')
expected = DataFrame({'value' : [4,10,4,1]},index=date_range('2014-11-08',freq='7s',periods=4))
assert_frame_equal(result,expected)
result = df.resample('11s', how='sum')
expected = DataFrame({'value' : [11,8]},index=date_range('2014-11-08',freq='11s',periods=2))
assert_frame_equal(result,expected)
result = df.resample('13s', how='sum')
expected = DataFrame({'value' : [13,6]},index=date_range('2014-11-08',freq='13s',periods=2))
assert_frame_equal(result,expected)
result = df. |
9seconds/concierge | tests/test_endpoints_daemon.py | Python | mit | 3,504 | 0 | # -*- coding: utf-8 -*-
import errno
import itertools
import os
import os.path
import inotify_simple
import pytest
import concierge
import concierge.endpoints.cli as cli
import concierge.endpoints.daemon as daemon
import concierge.utils
def get_app(*params):
pars | er = cli.create_parser()
parser = | daemon.Daemon.specify_parser(parser)
parsed = parser.parse_args()
for param in params:
if param:
setattr(parsed, param.strip("-"), True)
app = daemon.Daemon(parsed)
return app
def test_create_app(cliargs_default, cliparam_systemd, cliparam_curlsh):
app = get_app(cliparam_systemd, cliparam_curlsh)
assert app.systemd == bool(cliparam_systemd)
assert app.curlsh == bool(cliparam_curlsh)
def test_print_help(capfd, cliargs_default, cliparam_curlsh):
app = get_app("--systemd", cliparam_curlsh)
app.do()
out, err = capfd.readouterr()
out = out.split("\n")
if cliparam_curlsh:
for line in out:
assert not line.startswith("$")
else:
assert line.startswith(("$", "Please")) or not line
assert not err
@pytest.mark.parametrize(
"main_method", (
True, False))
def test_work(mock_mainfunc, ptmpdir, main_method):
_, _, inotifier = mock_mainfunc
app = get_app()
app.destination_path = ptmpdir.join("filename").strpath
if main_method:
app.do()
else:
app.track()
inotifier.add_watch.assert_called_once_with(
os.path.dirname(concierge.DEFAULT_RC), daemon.INOTIFY_FLAGS)
assert not inotifier.v
with concierge.utils.topen(ptmpdir.join("filename").strpath) as filefp:
assert 1 == sum(int(line.strip() == "Host *") for line in filefp)
def test_track_no_our_events(no_sleep, mock_mainfunc, ptmpdir):
_, _, inotifier = mock_mainfunc
inotifier.v.clear()
inotifier.v.extend([inotify_simple.Event(0, 0, 0, "Fake")] * 3)
app = get_app()
app.destination_path = ptmpdir.join("filename").strpath
app.track()
assert not os.path.exists(ptmpdir.join("filename").strpath)
def test_track_cannot_read(no_sleep, mock_mainfunc, ptmpdir):
_, _, inotifier = mock_mainfunc
def add_watch(*args, **kwargs):
exc = IOError("Hello?")
exc.errno = errno.EPERM
raise exc
inotifier.add_watch.side_effect = add_watch
app = get_app()
app.destination_path = ptmpdir.join("filename").strpath
with pytest.raises(IOError):
app.track()
@pytest.mark.parametrize(
"ev1, ev2",
list(itertools.permutations(inotify_simple.flags, 2)))
def test_event_names(ev1, ev2):
events = [
inotify_simple.Event(0, ev1, 0, "ev1"),
inotify_simple.Event(0, ev2, 0, "ev2"),
inotify_simple.Event(0, ev1 | ev2, 0, "ev1ev2")]
descriptions = daemon.Daemon.describe_events(events)
assert len(descriptions) == len(events)
assert "ev1" in descriptions[0]
assert str(ev1) in descriptions[0]
assert "ev2" in descriptions[1]
assert str(ev2) in descriptions[1]
assert "ev1" in descriptions[2]
assert "ev2" in descriptions[2]
assert str(ev1) in descriptions[2]
assert str(ev2) in descriptions[2]
def test_mainfunc_ok(mock_mainfunc):
result = daemon.main()
assert result is None or result == os.EX_OK
def test_mainfunc_exception(mock_mainfunc):
_, _, inotifier = mock_mainfunc
inotifier.read.side_effect = Exception
result = daemon.main()
assert result != os.EX_OK
|
viggates/nova | nova/tests/api/openstack/compute/contrib/test_agents.py | Python | apache-2.0 | 10,887 | 0.003215 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack.compute.contrib import agents
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import test
fake_agents_list = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'id': 2},
{'hypervisor': 'xen', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx2',
'md5hash': 'add6bb58e139be103324d04d82d8f547',
'id': 3},
{'hypervisor': 'xen', 'os': 'win',
'architecture': 'power',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx3',
'md5hash': 'add6bb58e139be103324d04d82d8f548',
'id': 4},
]
def fake_agent_build_get_all(context, hypervisor):
agent_build_all = []
for agent in fake_agents_list:
if hypervisor and hypervisor != agent['hypervisor']:
continue
agent_build_ref = models.AgentBuild()
agent_build_ref.update(agent)
agent_build_all.append(agent_build_ref)
return agent_build_all
def fake_agent_build_update(context, agent_build_id, values):
pass
def fake_agent_build_destroy(context, agent_update_id):
pass
def fake_agent_build_create(context, values):
values['id'] = 1
agent_build_ref = models.AgentBuild()
agent_build_ref.update(values)
return agent_build_ref
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
GET = {}
class FakeRequestWithHypervisor(object):
environ = {"nova.context": context.get_admin_context()}
GET = {'hypervisor': 'kvm'}
class AgentsTest(test.NoDBTestCase):
def setUp(self):
super(AgentsTest, self).setUp()
self.stubs.Set(db, "agent_build_get_all",
fake_agent_build_get_all)
self.stubs.Set(db, "agent_build_update",
fake_agent_build_update)
self.stubs.Set(db, "agent_build_destroy",
fake_agent_build_destroy)
self.stubs.Set(db, "agent_build_create",
fake_agent_build_create)
self.context = context.get_admin_context()
self.controller = agents.AgentController()
def test_agents_create(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
response = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1}}
res_dict = self.controller.create(req, body)
self.assertEqual(res_dict, response)
def test_agents_create_key_error(self):
req = FakeRequest()
body = {'agent': {'hypervisordummy': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
def test_agents_create_with_existed_agent(self):
def fake_agent_build_create_with_exited_agent(context, values):
raise exception.AgentBuildExists(**values)
self.stubs.Set(db, 'agent_build_create',
fake_agent_build_create_with_exited_agent)
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(webob.exc.HTTPConflict, self.controller.create, req,
body=body)
def _test_agents_create_with_invalid_length(self, key):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
body['agent'][key] = 'x' * 256
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
def test_agents_create_with_invalid_length_hypervisor(self):
self._test_agents_create_with_invalid_length('hypervisor')
def test_agents_create_with_invalid_length_os(self):
self._test_agents_create_with_invalid_length('os')
def test_agents_create_with_invalid_length_architecture(self):
self._test_agents_create_with_invalid_length('architecture')
def test_agents_create_with_invalid_length_version(self):
self._test_agents_create_with_invalid_length('version')
def test_agents_create_with_invalid_length_url(self):
self._test_agents_create_with_invalid_length('url')
def test_agents_create_with_invalid_length_md5hash(self):
self._test_agents_create_with_invalid_length('md5hash')
def test_agents_delete(self):
req = FakeRequest()
self.controller.delete(req, 1)
def test_agents_list(self):
req = FakeRequest()
res_dict = self.controller.index(req)
agents_list = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'agent_id': 2},
{'hypervisor': 'xen', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx2',
'md5hash': 'add6bb58 | e139be103324d04d82d8f547',
'agent_id': 3},
{'hypervisor': 'xen', 'os': 'win',
| 'architecture': 'power',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx3',
'md5hash': 'add6bb58e139be103324d04d82d8f548',
'agent_id': 4},
]
self.assertEqual(res_dict, {'agents': agents_list})
def test_agents_list_with_hypervisor(self):
|
obsrvbl/kinesis-logs-reader | kinesis_logs_reader/kinesis_logs_reader.py | Python | apache-2.0 | 3,798 | 0 | # Copyright 2016 Observable Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from json import loads
from boto3.session import Session
from .utils import gunzip_bytes
class KinesisLogsReader(object):
def __init__(
self, stream_name, start_time=None, kinesis_client=None, **kwargs
):
"""
Return an iterable object that will yield log events as Python dicts.
* `stream_name` identifies the Kinesis stream with a log subscription.
* `start_time` is a Python datetime object; Records from this Timestamp
on will be returned. By default the latest records will be returned.
* `kinesis_client` is a boto3.client object. By default one will be
created with the given `kwargs`.
"""
self.kinesis_client = kinesis_client or self._get_client(**kwargs)
self.stream_name = stream_name
self.shard_ids = list(self._get_shard_ids())
self.shard_iterators = {}
for shard_id in self.shard_ids:
self.shard_iterators[shard_id] = self._get_shard_iterator(
shard_id, start_time
)
self.shard_finished = {shard_id: False for shard_id in self.shard_ids}
self.iterator = self._reader()
def __iter__(self):
return self
def __next__(self):
return next(self.iterator)
def next(self):
# For Python 2 compatibility
return self.__next__()
def _get_client(self, **kwargs):
return Session(**kwargs).client('kinesis')
def _get_shard_ids(self):
paginator = self.kinesis_client.get_paginator('describe_stream')
for page in paginator.paginate(StreamName=self.stream_nam | e):
stream_description = page.get('StreamDescription', {})
for shard in stream_description.get('Shards', []):
yield shard['ShardId']
def _get_shard_iterator(self, shard_id, start_time=None):
kwargs = {'StreamName': self.stream_name, 'ShardId': shard_id}
if start_time is None:
kwargs['ShardIteratorType'] = 'LATEST'
else:
kwargs['ShardIteratorType'] = 'AT_TIMESTAMP'
kwargs['Timestamp'] = st | art_time
response = self.kinesis_client.get_shard_iterator(**kwargs)
return response['ShardIterator']
def _read_shard(self, shard_id):
iterator = self.shard_iterators[shard_id]
response = self.kinesis_client.get_records(ShardIterator=iterator)
self.shard_iterators[shard_id] = response['NextShardIterator']
self.shard_finished[shard_id] = response['MillisBehindLatest'] == 0
for record in response.get('Records', []):
gz_data = record['Data']
raw_data = gunzip_bytes(gz_data)
data = loads(raw_data.decode('utf-8'))
if data.get('messageType') != 'DATA_MESSAGE':
continue
for flow_record in data.get('logEvents', []):
yield flow_record['extractedFields']
def _reader(self):
while True:
for shard_id in self.shard_ids:
for item in self._read_shard(shard_id):
yield item
if all(self.shard_finished.values()):
break
|
xifle/home-assistant | homeassistant/components/cover/demo.py | Python | mit | 5,129 | 0 | """
Demo platform for the cover component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
from homeassistant.components.cover import CoverDevice
from homeassistant.helpers.event import track_utc_time_change
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Demo covers."""
add_devices([
DemoCover(hass, 'Kitchen Window'),
DemoCover(hass, 'Hall Window', 10),
DemoCover(hass, 'Living Room Window', 70, 50),
])
class DemoCover(CoverDevice):
"""Representation of a demo cover."""
# pylint: disable=no-self-use
def __init__(self, hass, name, position=None, tilt_position=None):
"""Initialize the cover."""
self.hass = hass
self._name = name
self._position = position
self._set_position = None
self._set_tilt_position = None
self._tilt_position = tilt_position
self._closing = True
self._closing_tilt = True
self._unsub_listener_cover = None
self._unsub_listener_cover_tilt = None
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo cover."""
return False
@property
def current_cover_position(self):
"""Return the current position of the cover."""
return self._position
@property
def current_cover_tilt_position(self):
"""Return the current tilt position of the cover."""
return self._tilt_position
@property
def is_closed(self):
"""Return if the cover is closed."""
if self._position is not None:
if self.current_cover_position > 0:
return False
else:
return True
else:
return None
def close_cover(self, **kwargs):
"""Close the cover."""
if self._position in (0, None):
return
self._listen_cover()
self._closing = True
def close_cover_tilt(self, **kwargs):
"""Close the cover tilt."""
if self._tilt_position in (0, None):
return
self._listen_cover_tilt()
self._closing_tilt = True
def open_cover(self, **kwargs):
"""Open the cover."""
if self._position in (100, None):
return
self._listen_cover()
self._closing = False
def open_cover_tilt(self, **kwargs):
"""Open the cover tilt."""
if self._tilt_position in (100, None):
return
self._listen_cover_tilt()
self._closing_tilt = False
def set_cover_position(self, position, **kwargs):
"""Move the cover to a specific position."""
self._set_position = round(position, -1)
if self._position == position:
return
self._listen_cover()
self._closing = position < self._position
def set_cover_tilt_position(self, tilt_position, **kwargs):
"""Move the cover til to a specific position."""
self._set_tilt_position = round(tilt_position, -1)
if self._tilt_position == tilt_position:
return
self._listen_cover_tilt()
self._closing_tilt = tilt_position < self._tilt_position
def stop_cover(self, **kwargs):
"""Stop the cover."""
if self._position is None:
return
if self._unsub_listener_cover is not None:
self._unsub_listener_cover()
self._unsub_listener_cover = None
self._set_position = None
def stop_cover_tilt(self, **kwargs):
"""Stop the cover tilt."""
if self._tilt_position is None:
return
if self._unsub_listener_cover_tilt is not None:
self._unsub_listener_cover_tilt()
self._unsub_listener_cover_tilt = None
self._set_tilt_position = None
def _listen_cover(self):
"""Listen for changes in cover."""
if self._unsub_listener_cover is None:
self._unsub_listener_cover = track_utc_time_change(
self.hass, self._time_changed_cover)
def _time_changed_cover(self, now | ):
"""Track time | changes."""
if self._closing:
self._position -= 10
else:
self._position += 10
if self._position in (100, 0, self._set_position):
self.stop_cover()
self.update_ha_state()
def _listen_cover_tilt(self):
"""Listen for changes in cover tilt."""
if self._unsub_listener_cover_tilt is None:
self._unsub_listener_cover_tilt = track_utc_time_change(
self.hass, self._time_changed_cover_tilt)
def _time_changed_cover_tilt(self, now):
"""Track time changes."""
if self._closing_tilt:
self._tilt_position -= 10
else:
self._tilt_position += 10
if self._tilt_position in (100, 0, self._set_tilt_position):
self.stop_cover_tilt()
self.update_ha_state()
|
opennode/nodeconductor-openstack | src/waldur_openstack/openstack/tests/test_service_project_link.py | Python | mit | 3,839 | 0.003126 | from rest_framework import test, status
from waldur_core.structure.models import CustomerRole, ProjectRole
from waldur_core.structure.tests import factories as structure_factories
from . import factories
class ServiceProjectLinkPermissionTest(test.APITransactionTestCase):
def setUp(self):
self.users = {
'owner': structure_factories.UserFactory(),
'admin': structure_factories.UserFactory(),
'manager': structure_factories.UserFactory(),
'no_role': structure_factories.UserFactory(),
'not_connected': structure_factories.UserFactory(),
}
# a single customer
self.customer = structure_factories.CustomerFactory()
self.customer.add_user(self.users['owner'], CustomerRole.OWNER)
# that has 3 users connected: admin, manager
self.connected_project = structure_factories.ProjectFactory(customer=self.customer)
self.connected_project.add_user(self.users['admin'], ProjectRole.ADMINISTRATOR)
self.connected_project.add_user(self.users['manager'], ProjectRole.MANAGER)
# has defined a service and connected service to a project
self.service = factories.OpenStackServiceFactory(customer=self.customer)
self.service_project_link = factories.OpenStackServiceProjectLinkFactory(
project=self.connected_project,
service=self.service)
# the customer also has another project with users but without a permission link
self.not_connected_project = structure_factories.ProjectFactory(customer=self.customer)
self.not_connected_project.add_user(self.users['not_connected'], ProjectRole.ADMINISTRATOR)
self.not_connected_project.save()
self.url = factories.OpenStackServiceProjectLinkFactory.get_list_url()
def test_anonymous_user_cannot_grant_service_to_project(self):
response = self.client.post(self.url, self._get_valid_payload())
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_user_can_connect_service_and_project_he_owns(self):
user = self.users['owner']
self.client.force_authenticate(user=user)
service = factories.OpenStackServiceFactory(customer=self.customer)
project = structure_factories.ProjectFactory(customer=self.customer)
payload = self._get_valid_payload(service, project)
response = self.client.post(self.url, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_admin_cannot_connect_new_service_and_project_if_he_is_project_admin(self):
user = self.users['admin']
self.client.force_authenticate(user=user)
service = factories.OpenStackServiceFactory(customer=self.customer)
project = self.connected_project
payload = self._get_valid_payload(service, project)
response = self.client.post(self.url, payload)
# the new service should not be visible to the user
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertDictContainsSubset(
{'servic | e': ['Invalid hyperlink - Object does not exist.']}, response.data)
def tes | t_user_cannot_revoke_service_and_project_permission_if_he_is_project_manager(self):
user = self.users['manager']
self.client.force_authenticate(user=user)
url = factories.OpenStackServiceProjectLinkFactory.get_url(self.service_project_link)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def _get_valid_payload(self, service=None, project=None):
return {
'service': factories.OpenStackServiceFactory.get_url(service),
'project': structure_factories.ProjectFactory.get_url(project)
}
|
jdmcbr/Shapely | docs/code/buffer.py | Python | bsd-3-clause | 1,095 | 0.00274 | from matplotlib import pyplot
from shapely.geometry import LineString
from descartes import PolygonPatch
from figures import SIZE, BLUE, GRAY, set_limits, plot_line
line = LineString([(0, 0), (1, 1), (0, 2), (2, 2), (3, 1), (1, 0)])
fig = pyplot.figure(1, figsize=SIZE, dpi=90)
# 1
ax = fig.add_subplot(121)
plot_line(ax, line)
dilated = line.buffer(0.5, cap_style=3)
patch1 = PolygonPatch(dilated, fc=BLUE, ec=BLUE, alpha=0.5, zorder=2)
ax.add_patch(patch1)
ax.set_title('a) dilation, cap_style=3')
set_limits(ax, -1, 4, -1, 3)
#2
ax = fig.add_subplot(122)
pat | ch2a = PolygonPatch(dilated, fc=GRAY, ec=GRAY, alpha=0.5, zorder=1)
ax.add_patch(patch2a)
eroded = dilated.buffer(-0.3)
# GeoJSON-like data works as well
polygon = eroded.__geo_interface__
# >>> geo['type']
# 'Polygo | n'
# >>> geo['coordinates'][0][:2]
# ((0.50502525316941682, 0.78786796564403572), (0.5247963548222736, 0.8096820147509064))
patch2b = PolygonPatch(polygon, fc=BLUE, ec=BLUE, alpha=0.5, zorder=2)
ax.add_patch(patch2b)
ax.set_title('b) erosion, join_style=1')
set_limits(ax, -1, 4, -1, 3)
pyplot.show()
|
divio/askbot-devel | askbot/tests/__init__.py | Python | gpl-3.0 | 1,266 | 0 | from askbot.tests.cache_tests import *
from askbot.tests.email_alert_tests import *
from askbot.tests.on_screen_notification_tests import *
from askbot.tests.page_load_tests import *
from askbot.tests.permission_assertion_tests import *
from askbot.tests.db_api_tests import *
from askbot.tests.skin_tests import *
from askbot.tests.badge_tests import *
from askbot.tests.management_command_tests import *
from askbot.tests.search_state_tests import *
from askbot.tests.form_tests import *
from askbot.tests.follow_tests import *
from askbot.tests.markup_test import *
from askbot.tests.post_model_te | sts import *
from askbot.tests.thread_model_tests import *
from askbot.tests.reply_by_email_tests import *
from askbot.tests.haystack_search_tests import *
from askbot.tests.email_parsing_tests import *
from askbot.tests.widget_tests import *
from askbot.tests.category_tree_tests import CategoryTreeTests
from askbot.tests.question_views_tests import *
from askbot.tests.user_model_tests import UserModelTests
fro | m askbot.tests.user_views_tests import *
from askbot.tests.utils_tests import *
from askbot.tests.view_context_tests import *
from askbot.tests.api_v1_tests import *
from askbot.tests.jive_tests import *
from askbot.tests.signal_handler_tests import *
|
linsalrob/EdwardsLab | Flinders/plot_scores_3d.py | Python | mit | 1,397 | 0.002863 | """
Plot a 3D scatter plot of the alignment scores from alignment_score.py
"""
import os
import random
import sys
import matplotlib.lines
import argparse
import matplotlib.colors
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pickle
import tkinter as tk
from tkinter import filedialog
if __name__ == '__m | ain__':
"""
parser = argparse.ArgumentParser(description="Plot a 3D scatter plot of the alignment scores from alignment_score.py")
parser.add_argument('-f', help='text separated data file output f | rom alignment_score.py', required=True)
args = parser.parse_args()
filename = args.f
"""
root = tk.Tk()
root.withdraw()
filename = filedialog.askopenfilename()
x = []
y = []
z = []
legends = ['seq1', 'seq2', '1-mer', '2-mer', '3-mer']
with open(filename, 'r') as fin:
for l in fin:
p = l.strip().split("\t")
x.append(float(p[2]))
y.append(float(p[3]))
z.append(float(p[4]))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z)
ax.legend()
ax.set_xlim(0, max(x))
ax.set_ylim(0, max(y))
ax.set_zlim(0, max(z))
ax.set_xlabel(legends[2])
ax.set_ylabel(legends[3])
ax.set_zlabel(legends[4])
pickle.dump(fig, open('/home/redwards/Desktop/3dfig.pickle', 'wb'))
plt.show()
|
ykaneko/quantum | quantum/api/v2/base.py | Python | apache-2.0 | 29,494 | 0.000102 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import webob.exc
from oslo.config import cfg
from quantum.api import api_common
from quantum.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from quantum.api.v2 import attributes
from quantum.api.v2 import resource as wsgi_resource
from quantum.common import exceptions
from quantum.openstack.common import log as logging
from quantum.openstack.common.notifier import api as notifier_api
from quantum import policy
from quantum import quota
LOG = logging.getLogger(__name__)
FAULT_MAP = {exceptions.NotFound: webob.exc.HTTPNotFound,
exceptions.Conflict: webob.exc.HTTPConflict,
exceptions.InUse: webob.exc.HTTPConflict,
exceptions.BadRequest: webob.exc.HTTPBadRequest,
exceptions.ServiceUnavailable: webob.exc.HTTPServiceUnavailable,
exceptions.NotAuthorized: webob.exc.HTTPForbidden,
netaddr.AddrFormatError: webob.exc.HTTPBadRequest,
}
class Controller(object):
LIST = 'list'
SHOW = 'show'
CREATE = 'create'
UPDATE = 'update'
DELETE = 'delete'
def __init__(self, plugin, collection, resource, attr_info,
allow_bulk=False, member_actions=None, parent=None,
allow_pagination=False, allow_sorting=False):
if member_actions is None:
member_actions = []
self._plugin = plugin
self._collection = collection.replace('-', '_')
self._resource = resource.replace('-', '_')
self._attr_info = attr_info
self._allow_bulk = allow_bulk
self._allow_pagination = allow_pagination
self._allow_sorting = allow_sorting
self._native_bulk = self._is_native_bulk_supported()
self._native_pagination = self._is_native_pagination_supported()
self._native_sorting = self._is_native_sorting_supported()
self._policy_attrs = [name for (name, info) in self._attr_info.items()
if info.get('required_by_policy')]
self._publisher_id = notifier_api.publisher_id('network')
self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self._member_actions = member_actions
self._primary_key = self._get_primary_key()
if self._allow_pagination and self._native_pagination:
# Native pagination need native sorting support
if not self._native_sorting:
raise Exception(_("Native pagination depend on native "
"sorting"))
if not self._allow_sorting:
LOG.info(_("Allow sorting is enabled because native "
| "pagination requires native sorting"))
self._allow_sorting = True
if parent:
self._parent_id_name = '%s_id' % parent['member_name']
parent_part = '_%s' % parent['member_name']
else:
self._parent_id_name = None
parent_part = ''
self._plugin_handlers = {
self.LIST | : 'get%s_%s' % (parent_part, self._collection),
self.SHOW: 'get%s_%s' % (parent_part, self._resource)
}
for action in [self.CREATE, self.UPDATE, self.DELETE]:
self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part,
self._resource)
def _get_primary_key(self, default_primary_key='id'):
for key, value in self._attr_info.iteritems():
if value.get('primary_key', False):
return key
return default_primary_key
def _is_native_bulk_supported(self):
native_bulk_attr_name = ("_%s__native_bulk_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_bulk_attr_name, False)
def _is_native_pagination_supported(self):
native_pagination_attr_name = ("_%s__native_pagination_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_pagination_attr_name, False)
def _is_native_sorting_supported(self):
native_sorting_attr_name = ("_%s__native_sorting_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_sorting_attr_name, False)
def _is_visible(self, context, attr_name, data):
action = "%s:%s" % (self._plugin_handlers[self.SHOW], attr_name)
# Optimistically init authz_check to True
authz_check = True
try:
attr = (attributes.RESOURCE_ATTRIBUTE_MAP
[self._collection].get(attr_name))
if attr and attr.get('enforce_policy'):
authz_check = policy.check_if_exists(
context, action, data)
except KeyError:
# The extension was not configured for adding its resources
# to the global resource attribute map. Policy check should
# not be performed
LOG.debug(_("The resource %(resource)s was not found in the "
"RESOURCE_ATTRIBUTE_MAP; unable to perform authZ "
"check for attribute %(attr)s"),
{'resource': self._collection,
'attr': attr})
except exceptions.PolicyRuleNotFound:
LOG.debug(_("Policy rule:%(action)s not found. Assuming no "
"authZ check is defined for %(attr)s"),
{'action': action,
'attr': attr_name})
attr_val = self._attr_info.get(attr_name)
return attr_val and attr_val['is_visible'] and authz_check
def _view(self, context, data, fields_to_strip=None):
# make sure fields_to_strip is iterable
if not fields_to_strip:
fields_to_strip = []
return dict(item for item in data.iteritems()
if (self._is_visible(context, item[0], data) and
item[0] not in fields_to_strip))
def _do_field_list(self, original_fields):
fields_to_add = None
# don't do anything if fields were not specified in the request
if original_fields:
fields_to_add = [attr for attr in self._policy_attrs
if attr not in original_fields]
original_fields.extend(self._policy_attrs)
return original_fields, fields_to_add
def __getattr__(self, name):
if name in self._member_actions:
def _handle_action(request, id, **kwargs):
arg_list = [request.context, id]
# Fetch the resource and verify if the user can access it
try:
resource = self._item(request, id, True)
except exceptions.PolicyNotAuthorized:
raise webob.exc.HTTPNotFound()
body = kwargs.pop('body', None)
# Explicit comparison with None to distinguish from {}
if body is not None:
arg_list.append(body)
# TODO(salvatore-orlando): bp/make-authz-ortogonal
# The body of the action request should be included
# in the info passed to the policy engine
# Enforce policy, if any, for this action
# It is ok to raise a 403 because accessibility to the
# object was checked earlier in this method
policy.en |
JoseALermaIII/python-tutorials | docs/source/conf.py | Python | mit | 6,850 | 0.001898 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../pythontutorials/books/'))
from pythontutorials import __version__
# -- Project information -----------------------------------------------------
project = 'python-tutorials'
copyright = '2018, Jose A. Lerma III'
author = 'Jose A. Lerma III'
# The short X.Y version
version = '.'.join(__version__.split('.', 2)[:2])
# | The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extens | ions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinxcontrib.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'python-tutorialsdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_engine = 'xelatex'
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
# Include babel package
#
'babel': r'\usepackage{babel}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'python-tutorials.tex', 'python-tutorials Documentation',
'Jose A. Lerma III', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'python-tutorials', 'python-tutorials Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'python-tutorials', 'python-tutorials Documentation',
author, 'python-tutorials', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
def setup(app):
app.add_stylesheet('css/custom.css')
# Mock top-level imports outside of module's main()
autodoc_mock_imports = ['docx', 'twilio', 'bs4', 'logging', 'imapclient', 'pyzmail', 'pyautogui']
# -- Options for intersphinx extension ---------------------------------------
intersphinx_mapping = {'python': ('https://docs.python.org/3/', None),
'requests': ('http://docs.python-requests.org/en/latest/', None),
'bs4': ('https://www.crummy.com/software/BeautifulSoup/bs4/doc/', None),
'imapclient': ('https://imapclient.readthedocs.io/en/2.1.0/', None),
'openpyxl': ('https://openpyxl.readthedocs.io/en/stable/', None),
'PyPDF2': ('https://pythonhosted.org/PyPDF2/', None),
'docx': ('https://python-docx.readthedocs.io/en/latest/', None),
'pyperclip': ('https://pyperclip.readthedocs.io/en/latest/', None),
'PIL': ('https://pillow.readthedocs.io/en/5.3.x/', None),
'PyAutoGUI': ('https://pyautogui.readthedocs.io/en/latest/', None),
}
|
pbugnion/bibscrape | handle_jcp.py | Python | lgpl-3.0 | 844 | 0.007109 |
"""
Hack to extract links from JCP search r | esults.
$ handle.py <file_name>
will return a link to the first result of a JCP search.
If there are no search results, gives a return code of 4. |
If there is at least one search result, prints the href in
the "<a title="Link to Article" href="..."> tag.
"""
import sys
from HTMLParser import HTMLParser
found = False
class Parser(HTMLParser):
def handle_starttag(self,tag,attrs):
global found
if tag == "a":
if ("title","Link to Article") in attrs:
found = True
for attr_name, attr_value in attrs:
if attr_name == "href":
print attr_value.split(";")[0]
parser = Parser()
with open(sys.argv[1]) as f:
html_text = f.read()
parser.feed(html_text)
if not found:
sys.exit(4)
|
dnarvaez/osbuild | osbuild/utils.py | Python | apache-2.0 | 983 | 0 | # Copyright 2013 Daniel Narvaez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import tty
import termios
devnull = open("/dev/null", "w")
def ensure_dir(path):
try:
| os.makedirs(path | )
except OSError:
pass
def getch():
fd = sys.stdin.fileno()
tty_attributes = termios.tcgetattr(fd)
try:
tty.setraw(fd)
return sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, tty_attributes)
|
wrobstory/sticky | sticky/core.py | Python | mit | 2,171 | 0 | # -*- coding: utf-8 -*-
"""
Sticky
-------
Python + IPython + D3
"""
from __future__ import print_function
from __future__ import division
import json
from uuid import uuid4
from IPython.core.display import display, Javascript
from IPython.html import widgets
from IPython.utils.traitlets import Unicode
from jinja2 import Environment, PackageLoader
ENV = Environment(loader=PackageLoader('sticky', 'templates'))
def initialize_notebook():
"""Initialize the IPython notebook display elements"""
try:
from IPython.core.display import HTML
except ImportError:
print("IPython Notebook could not be loaded.")
lib_js = ENV.get_template('ipynb_init_js.html')
lib_css = ENV.get_template('ipynb_init_css.html')
display(HTML(lib_js.render()))
display(HTML(lib_css.render()))
class StickyDOMWidget(widgets.DOMWidget):
"""DOM Creator Widget"""
_view_name = Unicode('StickyDOMWidget', sync=True)
chart_id = Unicode(sync=True)
class Chart(object):
def _get_dom_widget(self):
"""Instance of MicropolarDOM"""
dom_widget = StickyDOMWidget( | )
dom_widget.chart_id = self.chart_id
return dom_widget
def init_widget_js(self):
"""Init Widget JS"""
dom_template = ENV.get_template('sticky_dom_widget.js')
display(Javascript(dom_template.render()))
display(Javascript(self.render_template.render()))
def _set_chart_attrs(self, **kwargs):
"""Set chart k with value v"""
for k, v in kwargs.items():
if k == 'data':
v = json.dump | s(v)
model_name = '_'.join(['model', k])
setattr(self, model_name, v)
def update(self, **kwargs):
"""Update View Model with given keywords"""
self._set_chart_attrs(**kwargs)
self.send_state()
def plot(self):
"""Plot Sticky chart"""
def render_chart(widget, **kwargs):
display(self)
self.chart_id = '_'.join([self.kind, uuid4().hex])
self.init_widget_js()
dom_widget = self._get_dom_widget()
dom_widget.on_displayed(render_chart)
display(dom_widget)
|
cocosli/antlr4 | runtime/Python2/src/antlr4/BufferedTokenStream.py | Python | bsd-3-clause | 11,959 | 0.005101 | #
# [The "BSD license"]
# Copyright (c) 2012 Terence Parr
# Copyright (c) 2012 Sam Harwell
# Copyright (c) 2014 Eric Vergnaud
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This implementation of {@link TokenStream} loads tokens from a
# {@link TokenSource} on-demand, and places the tokens in a buffer to provide
# access to any previous token by index.
#
# <p>
# This token stream ignores the value of {@link Token#getChannel}. If your
# parser requires the token stream filter tokens to only those on a particular
# channel, such as {@link Token#DEFAULT_CHANNEL} or
# {@link Token#HIDDEN_CHANNEL}, use a filtering token stream such a
# {@link CommonTokenStream}.</p>
from io import StringIO
from antlr4.Token import Token
from antlr4.error.Errors import IllegalStateException
# this is just to keep meaningful parameter types to Parser
class TokenStream(object):
pass
class BufferedTokenStream(TokenStream):
def __init__(self, tokenSource):
# The {@link TokenSource} from which tokens for this stream are fetched.
self.tokenSource = tokenSource
# A collection of all tokens fetched from the token source. The list is
# considered a complete view of the input once {@link #fetchedEOF} is set
# to {@code true}.
self.tokens = []
# The index into {@link #tokens} of the current token (next token to
# {@link #consume}). {@link #tokens}{@code [}{@link #p}{@code ]} should be
# {@link #LT LT(1)}.
#
# <p>This field is set to -1 when the stream is first constructed or when
# {@link #setTokenSource} is called, indicating that the first token has
# not yet been fetched from the token source. For additional information,
# see the documentation of {@link IntStream} for a description of
# Initializing Methods.</p>
self.index = -1
# Indicates whether the {@link Token#EOF} token has been fetched from
# {@link #tokenSource} and added to {@link #tokens}. This field improves
# performance for the following cases:
#
# <ul>
# <li>{@link #consume}: The lookahead check in {@link #consume} to prevent
# consuming the EOF symbol is optimized by checking the values of
# {@link #fetchedEOF} and {@link #p} instead of calling {@link #LA}.</li>
# <li>{@link #fetch}: The check to prevent adding multiple EOF symbols into
# {@link #tokens} is trivial with this field.</li>
# <ul>
self.fetchedEOF = False
def mark(self):
return 0
def release(self, marker):
# no resources to release
pass
def reset(self):
self.seek(0)
def seek(self, index):
self.lazyInit()
self.index = self.adjustSeekIndex(index)
def get(self, index):
self.lazyInit()
return self.tokens[index]
def consume(self):
skipEofCheck = False
if self.index >= 0:
if self.fetchedEOF:
# the last token in tokens is EOF. skip check if p indexes any
# fetched token except the last.
skipEofCheck = self.index < len(self.tokens) - 1
else:
# no EOF token in tokens. skip check if p indexes a fetched token.
skipEofCheck = self.index < len(self.tokens)
else:
# not yet initialized
skipEofCheck = False
if not skipEofCheck and self.LA(1) == Token.EOF:
raise IllegalStateException("cannot consume EOF")
if self.sync(self.index + 1):
self.index = self.adjustSeekIndex(self.index + 1)
# Make sure index {@code i} in tokens has a token.
#
# @return {@code true} if a token is located at index {@code i}, otherwise
# {@code false}.
# @see #get(int i)
#/
def sync(self, i):
assert i >= 0
n = i - len(self.tokens) + 1 # how many more elements we need?
if n > 0 :
fetched = self.fetch(n)
return fetched >= n
return True
# Add {@code n} elements to buffer.
#
# @return The actual number of elements added to the buffer.
#/
def fetch(self, n):
if self.fetchedEOF:
return 0
for i in range(0, n):
t = self.tokenSource.nextToken()
t.tokenIndex = len(self.tokens)
self.tokens.append(t)
if t.type==Token.EOF:
self.fetchedEOF = True
return i + 1
return n
# Get all tokens from start..stop inclusively#/
def getTokens(self, start, stop, types=None):
if start<0 or stop<0:
return None
self.lazyInit()
subset = []
if stop >= len(self.tokens):
stop = len(self.tokens)-1
for i in range(start, stop):
t = self.tokens[i]
if t.type==Token.EOF:
break
if types is None or t.type in types:
subset.append(t)
return subset
def LA(self, i):
return self.LT(i).type
def LB(self, k):
if (self.index-k) < 0:
return None
return self.tokens[self.index-k]
def LT(self, k):
self.lazyInit()
if k==0:
return None
if k < 0:
return self.LB(-k)
i = self.index + k - 1
self.sync(i)
if i >= len(self.tokens): # | return EOF token
# EOF must be last token
return self.tokens[len(self.tokens)-1]
return self.tokens[i]
# Allowed derived classes to modify the behavior of operations which change
# the current stream position by adjusting the target toke | n index of a seek
# operation. The default implementation simply returns {@code i}. If an
# exception is thrown in this method, the current stream index should not be
# changed.
#
# <p>For example, {@link CommonTokenStream} overrides this method to ensure that
# the seek target is always an on-channel token.</p>
#
# @param i The target token index.
# @return The adjusted target token index.
def adjustSeekIndex(self, i):
return i
def lazyInit(self):
if self.index == -1:
self.setup()
def setup(self):
self.sync(0)
self.index = self.adjustSeekIndex(0)
# Reset this token stream by setting its token source.#/
def setTokenSource(self, tokenSource):
self.tokenSource = tokenSource
self.tokens = []
self.index = -1
# Given a starting index, return the index of the next token on channel.
# Return i if tokens[i] is on channel. Return -1 if there are no tokens
# on channel between i and EOF.
#/
def nextTokenOnChannel( |
stefwalter/cockpituous | tests/doc/inkscape-export.py | Python | lgpl-2.1 | 1,516 | 0.005277 | #!/usr/bin/env python
import os
import codecs
from xml.dom import minidom
import subprocess
import sys
INKSCAPE = '/usr/bin/inkscape'
def list_layers(svg):
layers = [ ]
for g in svg.getElementsByTagName("g"):
if g.attributes.has_key("inkscape:label"):
layers.append(g.attributes["inkscape:label"].value)
return layers
def export_layer(svg, directory, layer, stay):
if layer in stay:
return
print layer, "..."
for g in svg.getElementsByTagName("g"):
if g.attributes.has_key("inkscape:label"):
label = g.attributes["inkscape:label"].value
if label == layer or label in stay:
g.attributes['style'] = 'display:inline'
else:
g.attributes['style'] = 'display:none'
dest = os.path.join(directory, layer + ".svg")
codecs.open(dest, "w", encoding="utf8").write(svg.toxml())
png = os.path.join(directory, layer + ".png")
subprocess.check_call([INKSCAPE, "--export-png", png, dest])
os.unlink(dest)
def main():
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument(' | --stay', action='append', default=[], help='layer to always have visible')
parser.add_argument('src', help='source SVG file.')
args = parser.parse_args()
svg = minidom.parse(open(args.src))
for layer in list_layers(svg):
export_layer(svg, os.path.dirname(args.src), layer, args.stay)
if _ | _name__ == '__main__':
main()
|
iamyangchen/xbmc-addons-chinese | plugin.video.pptv/default.py | Python | gpl-2.0 | 30,009 | 0.039897 | # -*- coding: utf-8 -*-
import xbmc, xbmcgui, xbmcplugin, xbmcaddon, urllib2, urllib, re, gzip, datetime, StringIO, urlparse
try:
import json
except:
import simplejson as json
import ChineseKeyboard
# Plugin constants
__addonname__ = "PPTV视频"
__addonid__ = "plugin.video.pptv"
__addon__ = xbmcaddon.Addon(id=__addonid__)
UserAgent_IPAD = 'Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; ja-jp) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5'
UserAgent_IE = 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)'
PPTV_LIST = 'http://list.pptv.com/'
PPTV_WEBPLAY_XML = 'http://web-play.pptv.com/'
PPTV_TV_LIST = 'http://live.pptv.com/list/tv_list'
PPTV_META_JSON = 'http://svcdn.pptv.com/show/v2/meta.json'
PPTV_PLAYLIST_JSON = 'http://svcdn.pptv.com/show/v2/playlist.json'
FLVCD_PARSER_PHP = 'http://www.flvcd.com/parse.php'
FLVCD_DIY_URL = 'http://www.flvcd.com/diy/diy00'
PPTV_SEARCH_URL = 'http://search.pptv.com/s_video/q_'
PPTV_TV_AREA_URL = 'http://live.pptv.com/api/tv_list?area_id='
PPTV_SUBJECT_LIST = 'http://live.pptv.com/api/subject_list?'
PPTV_CURRENT = '当前'
PPTV_SORT = '排序:'
PPTV_TTH = '第'
PPTV_FIELD = '节'
PPTV_PAGE = '页'
PPTV_SELECT = '按此选择'
PPTV_FIRST_PAGE = '第一页'
PPTV_LAST_PAGE = '最后一页'
PPTV_PREV_PAGE = '上一页'
PPTV_NEXT_PAGE = '下一页'
PPTV_MSG_GET_URL_FAILED = '无法获取视频地址!'
PPTV_MSG_INVALID_URL = '无效的视频地址, 可能不是PPTV视频!'
PPTV_MSG_NO_VIP = '暂时无法观看PPTV VIP视频!'
PPTV_SEARCH = '按此进行搜索...'
PPTV_SEARCH_DESC = '请输入搜索内容'
PPTV_SEARCH_RES = '搜索结果'
# PPTV video qualities
PPTV_VIDEO_NORMAL = 0
PPTV_VIDEO_HD = 1
PPTV_VIDEO_FHD = 2
PPTV_VIDEO_BLUER = 3
# PPTV video quality values
# Note: Blue ray video is currently only available to VIP users, so pity
PPTV_VIDEO_QUALITY_VALS = ('normal', 'high', 'super', '')
PPTV_EM_QUALITY_VALS = ('收费', '超清', '蓝光', 'VIP', '登录', '独家', '首播', '最新', '直播')
PPTV_TV_AREAS = 35
PPTV_LIVE_TYPES = { 'http://live.pptv.com/list/sports_program/' : '35', 'http://live.pptv.com/list/game_program/' : '5', 'http://live.pptv.com/list/finance/' : '47' }
##### Common functions #####
dbg = False
dbglevel = 3
def GetHttpData(url, agent = UserAgent_IPAD):
#print "getHttpData: " + url
req = urllib2.Request(url)
req.add_header('Accept-encoding', 'gzip')
req.add_header('User-Agent', agent)
try:
response = urllib2.urlopen(req)
httpdata = response.read()
if response.headers.get('content-encoding', None) == 'gzip':
try:
tmpdata = gzip.GzipFile(fileobj = StringIO.StringIO(httpdata)).read()
httpdata = tmpdata
except:
print "Invalid gzip content on: " + url
charset = response.headers.getparam('charset')
response.close()
except:
print 'GetHttpData Error: %s' % url
return ''
match = re.compile('<meta http-equiv=["]?[Cc]ontent-[Tt]ype["]? content="text/html;[\s]?charset=(.+?)"').findall(httpdata)
if len(match)>0:
charset = match[0]
if charset:
charset = charset.lower()
if (charset != 'utf-8') and (charset != 'utf8'):
httpdata = httpdata.decode(charset, 'ignore').encode('utf8', 'ignore')
return httpdata
def _getDOMContent(html, name, match, ret): # Cleanup
log("matc | h: " + match, 3)
endstr = u"</" + name # + ">"
start = html.find(match)
end = html.find(endstr, start)
pos = html.find("<" + name, start + 1 )
log(str(start) + " | < " + str(end) + ", pos = " + str(pos) + ", endpos: " + str(end), 8)
while pos < end and pos != -1: # Ignore too early </endstr> return
tend = html.find(endstr, end + len(endstr))
if tend != -1:
end = tend
pos = html.find("<" + name, pos + 1)
log("loop: " + str(start) + " < " + str(end) + " pos = " + str(pos), 8)
log("start: %s, len: %s, end: %s" % (start, len(match), end), 3)
if start == -1 and end == -1:
result = u""
elif start > -1 and end > -1:
result = html[start + len(match):end]
elif end > -1:
result = html[:end]
elif start > -1:
result = html[start + len(match):]
if ret:
endstr = html[end:html.find(">", html.find(endstr)) + 1]
result = match + result + endstr
log("done result length: " + str(len(result)), 3)
return result
def _getDOMAttributes(match, name, ret):
log("", 3)
lst = re.compile('<' + name + '.*?' + ret + '=(.[^>]*?)>', re.M | re.S).findall(match)
ret = []
for tmp in lst:
cont_char = tmp[0]
if cont_char in "'\"":
log("Using %s as quotation mark" % cont_char, 3)
# Limit down to next variable.
if tmp.find('=' + cont_char, tmp.find(cont_char, 1)) > -1:
tmp = tmp[:tmp.find('=' + cont_char, tmp.find(cont_char, 1))]
# Limit to the last quotation mark
if tmp.rfind(cont_char, 1) > -1:
tmp = tmp[1:tmp.rfind(cont_char)]
else:
log("No quotation mark found", 3)
if tmp.find(" ") > 0:
tmp = tmp[:tmp.find(" ")]
elif tmp.find("/") > 0:
tmp = tmp[:tmp.find("/")]
elif tmp.find(">") > 0:
tmp = tmp[:tmp.find(">")]
ret.append(tmp.strip())
log("Done: " + repr(ret), 3)
if len(ret) <= 0:
ret.append('')
return ret
def _getDOMElements(item, name, attrs):
log("", 3)
lst = []
for key in attrs:
lst2 = re.compile('(<' + name + '[^>]*?(?:' + key + '=[\'"]' + attrs[key] + '[\'"].*?>))', re.M | re.S).findall(item)
if len(lst2) == 0 and attrs[key].find(" ") == -1: # Try matching without quotation marks
lst2 = re.compile('(<' + name + '[^>]*?(?:' + key + '=' + attrs[key] + '.*?>))', re.M | re.S).findall(item)
if len(lst) == 0:
log("Setting main list " + repr(lst2), 5)
lst = lst2
lst2 = []
else:
log("Setting new list " + repr(lst2), 5)
test = range(len(lst))
test.reverse()
for i in test: # Delete anything missing from the next list.
if not lst[i] in lst2:
log("Purging mismatch " + str(len(lst)) + " - " + repr(lst[i]), 3)
del(lst[i])
if len(lst) == 0 and attrs == {}:
log("No list found, trying to match on name only", 3)
lst = re.compile('(<' + name + '>)', re.M | re.S).findall(item)
if len(lst) == 0:
lst = re.compile('(<' + name + ' .*?>)', re.M | re.S).findall(item)
log("Done: " + str(type(lst)), 3)
return lst
def parseDOM(html, name=u"", attrs={}, ret=False):
log("Name: " + repr(name) + " - Attrs:" + repr(attrs) + " - Ret: " + repr(ret) + " - HTML: " + str(type(html)), 3)
if isinstance(html, str): # Should be handled
html = [html]
elif isinstance(html, unicode):
html = [html]
elif not isinstance(html, list):
log("Input isn't list or string/unicode.")
return u""
if not name.strip():
log("Missing tag name")
return u""
ret_lst = []
for item in html:
temp_item = re.compile('(<[^>]*?\n[^>]*?>)').findall(item)
for match in temp_item:
item = item.replace(match, match.replace("\n", " "))
lst = _getDOMElements(item, name, attrs)
if isinstance(ret, str):
log("Getting attribute %s content for %s matches " % (ret, len(lst) ), 3)
lst2 = []
for match in lst:
lst2 += _getDOMAttributes(match, name, ret)
lst = lst2
else:
log("Getting element content for %s matches " % len(lst), 3)
lst2 = []
for match in lst:
log("Getting element content for %s" % match, 4)
temp = _getDOMContent(item, name, match, ret).strip()
item = item[item.find(temp, item.find(match)) + len(temp):]
lst2.append(temp)
lst = lst2
ret_lst += lst
log("Done: " + repr(ret_lst), 3)
return ret_lst
def log(description, level=0):
if dbg and dbglevel > level:
print description
##### Common functions end #####
def GetPPTVCatalogs():
cat_list = []
links = []
names = []
data = GetHttpData(PPTV_TV_LIST)
chl = CheckValidList(parseDOM(unicode(data, 'utf-8', 'ignore'), 'li', attrs = { 'class' : 'level_1 ' }))
if len(chl) > 0:
links = parseDOM(chl, 'a', ret = 'href')
names = parseDOM(chl, 'a')
data = GetHttpData(PPTV_LIST)
chl = CheckValidList(parseDOM(unicode(data, 'utf-8', 'ignore'), 'div', attrs = { 'class' : 'detail_menu' }))
if len(chl) > 0:
links.extend(parseDOM(chl, 'a', ret = 'href'))
names.extend(parseDOM(chl, 'a'))
cat_list.extend([{ 'link' : re.sub('\.pptv\.com\?', '.pptv.com/?', i.encode('utf-8')), 'name' : j.encode('utf-8') } for i, j in zip(links, names)])
return cat_list
def CheckJSLink(link):
return (link[:11] != 'javascript:' and link or '')
def CheckValidList(val):
return (len(val) |
xuwei0455/hedge | bin/ctaBackTesting.py | Python | mit | 32,917 | 0.000522 | # -*- coding: utf-8 -*-
"""
本文件中包含的是CTA模块的回测引擎,回测引擎的API和CTA引擎一致,
可以使用和实盘相同的代码进行回测。
"""
from __future__ import division
from collections import OrderedDict
from datetime import datetime, timedelta
from itertools import product
import pymongo
from vnpy.engine.cta.ctaBase import CtaTickData, StopOrder
fro | m vnpy.engine.cta.ctaConstant import *
from vnpy.utils.vtConstant import *
from vnpy.utils.vtFunction import loadMongoSetting
from vnpy.utils.vtGateway import VtOrderData, VtTradeData
########################################################################
class BackTestingEngine(object):
| """
CTA回测引擎
函数接口和策略引擎保持一样,
从而实现同一套代码从回测到实盘。
"""
TICK_MODE = 'tick'
BAR_MODE = 'bar'
# ----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
# 本地停止单编号计数
self.stopOrderCount = 0
# stopOrderID = STOPORDERPREFIX + str(stopOrderCount)
# 本地停止单字典
# key为stopOrderID,value为stopOrder对象
self.stopOrderDict = {} # 停止单撤销后不会从本字典中删除
self.workingStopOrderDict = {} # 停止单撤销后会从本字典中删除
# 引擎类型为回测
self.engineType = ENGINETYPE_BACKTESTING
# 回测相关
self.strategy = None # 回测策略
self.mode = self.BAR_MODE # 回测模式,默认为K线
self.slippage = 0 # 回测时假设的滑点
self.rate = 0 # 回测时假设的佣金比例(适用于百分比佣金)
self.size = 1 # 合约大小,默认为1
self.dbClient = None # 数据库客户端
self.dbCursor = None # 数据库指针
# self.historyData = [] # 历史数据的列表,回测用
self.initData = [] # 初始化用的数据
# self.backtestingData = [] # 回测用的数据
self.dbName = '' # 回测数据库名
self.symbol = '' # 回测集合名
self.dataStartDate = None # 回测数据开始日期,datetime对象
self.dataEndDate = None # 回测数据结束日期,datetime对象
self.strategyStartDate = None # 策略启动日期(即前面的数据用于初始化),datetime对象
self.limitOrderDict = OrderedDict() # 限价单字典
self.workingLimitOrderDict = OrderedDict() # 活动限价单字典,用于进行撮合用
self.limitOrderCount = 0 # 限价单编号
self.tradeCount = 0 # 成交编号
self.tradeDict = OrderedDict() # 成交字典
self.logList = [] # 日志记录
# 当前最新数据,用于模拟成交用
self.tick = None
self.bar = None
self.dt = None # 最新的时间
# ----------------------------------------------------------------------
def setStartDate(self, startDate='20100416', initDays=10):
"""设置回测的启动日期"""
self.dataStartDate = datetime.strptime(startDate, '%Y%m%d')
initTimeDelta = timedelta(initDays)
self.strategyStartDate = self.dataStartDate + initTimeDelta
# ----------------------------------------------------------------------
def setEndDate(self, endDate=''):
"""设置回测的结束日期"""
if endDate:
self.dataEndDate = datetime.strptime(endDate, '%Y%m%d')
# ----------------------------------------------------------------------
def setBackTestingMode(self, mode):
"""设置回测模式"""
self.mode = mode
# ----------------------------------------------------------------------
def setDatabase(self, dbName, symbol):
"""设置历史数据所用的数据库"""
self.dbName = dbName
self.symbol = symbol
# ----------------------------------------------------------------------
def loadHistoryData(self):
"""载入历史数据"""
host, port = loadMongoSetting()
self.dbClient = pymongo.MongoClient(host, port)
collection = self.dbClient[self.dbName][self.symbol]
self.output(u'开始载入数据')
# 首先根据回测模式,确认要使用的数据类
if self.mode == self.BAR_MODE:
dataClass = CtaBarData
func = self.newBar
else:
dataClass = CtaTickData
func = self.newTick
# 载入初始化需要用的数据
flt = {'datetime': {'$gte': self.dataStartDate,
'$lt': self.strategyStartDate}}
initCursor = collection.find(flt)
# 将数据从查询指针中读取出,并生成列表
for d in initCursor:
data = dataClass()
data.__dict__ = d
self.initData.append(data)
# 载入回测数据
if not self.dataEndDate:
flt = {'datetime': {'$gte': self.strategyStartDate}} # 数据过滤条件
else:
flt = {'datetime': {'$gte': self.strategyStartDate,
'$lte': self.dataEndDate}}
self.dbCursor = collection.find(flt)
self.output(u'载入完成,数据量:%s' % (initCursor.count() + self.dbCursor.count()))
# ----------------------------------------------------------------------
def runBackTesting(self):
"""运行回测"""
# 载入历史数据
self.loadHistoryData()
# 首先根据回测模式,确认要使用的数据类
if self.mode == self.BAR_MODE:
dataClass = CtaBarData
func = self.newBar
else:
dataClass = CtaTickData
func = self.newTick
self.output(u'开始回测')
self.strategy.inited = True
self.strategy.onInit()
self.output(u'策略初始化完成')
self.strategy.trading = True
self.strategy.onStart()
self.output(u'策略启动完成')
self.output(u'开始回放数据')
for d in self.dbCursor:
data = dataClass()
data.__dict__ = d
func(data)
self.output(u'数据回放结束')
# ----------------------------------------------------------------------
def newBar(self, bar):
"""新的K线"""
self.bar = bar
self.dt = bar.datetime
self.crossLimitOrder() # 先撮合限价单
self.crossStopOrder() # 再撮合停止单
self.strategy.onBar(bar) # 推送K线到策略中
# ----------------------------------------------------------------------
def newTick(self, tick):
"""新的Tick"""
self.tick = tick
self.dt = tick.datetime
self.crossLimitOrder()
self.crossStopOrder()
self.strategy.onTick(tick)
# ----------------------------------------------------------------------
def initStrategy(self, strategyClass, setting=None):
"""
初始化策略
setting是策略的参数设置,如果使用类中写好的默认设置则可以不传该参数
"""
self.strategy = strategyClass(self, setting)
self.strategy.name = self.strategy.className
# ----------------------------------------------------------------------
def sendOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发单"""
self.limitOrderCount += 1
orderID = str(self.limitOrderCount)
order = VtOrderData()
order.vtSymbol = vtSymbol
order.price = price
order.totalVolume = volume
order.status = STATUS_NOTTRADED # 刚提交尚未成交
order.orderID = orderID
order.vtOrderID = orderID
order.orderTime = str(self.dt)
# CTA委托类型映射
if orderType == CTAORDER_BUY:
order.direction = DIRECTION_LONG
order.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
order.direction = DIRECTION_SHORT
order.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
order.direction = DIRECTION_SHORT
order.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
order.direction = DIRECTION_LONG
order.offset = OFFSET_CLOSE
# 保存到限价单字典中
self.workingLimitOrderDict[orderID] = order
self.limitOrderDict[orderID] = order
return orderID
# ----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
if vtOrderID in self.workingLimitOrderDict:
order = self.workingLimitOrderDict[vtOrderID]
order.status = STATUS_CANCELLED
order.cancelTime = str(self.dt)
del self.workingLimitOrderDict[vtOrderID]
# ----------------------------------------------------------------------
def sendStopOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发停止单(本地实现)"""
self.stopOrderCount += 1
stopOrderID = STOPORDERPREFIX + str(self.stopOrderCount)
so = StopOrder()
so.vtSymbol = vtSymbol
so.price = price
so.volume = volume
|
armagetronad-xtw/0.4-armagetronad-xtw | batch/make/sortresources.py | Python | gpl-2.0 | 618 | 0.006472 | #!/usr/bin/python
import sys, os
foundModule = False
newPathSearch = os.path.dirname(os.path.abspath(sys.argv[0]) )
sys.path.insert(0, newPathSearch)
maxnumAttemp | ts = 10
numAttempts = 0
while not foundModule:
try:
import armabuild
import armabuild.resource
foundModule = True
print("Found armabuild!")
except:
sys.path[0] = os.path.dirname(newPathSearch)
numAttempts += 1
if numAttempts > 9:
print("Unable to find armabuild module. Can't continue.")
s | ys.exit(1)
if __name__ == "__main__":
armabuild.resource.main(sys.argv[1:])
|
CooperLuan/devops.notes | taobao/top/api/rest/WlbItemBatchQueryRequest.py | Python | mit | 390 | 0.030769 | '''
Created by auto_sdk on 2014-12-17 17:22:51
'''
from top.api.base import RestApi
class WlbItemBatchQu | eryRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.item_ids = None
self.page_no = None
self.page_ | size = None
self.store_code = None
def getapiname(self):
return 'taobao.wlb.item.batch.query'
|
HiSPARC/station-software | user/python/Lib/test/test_codecs.py | Python | gpl-3.0 | 81,185 | 0.001552 | from test import test_support
import unittest
import codecs
import locale
import sys, StringIO
def coding_checker(self, coder):
def check(input, expect):
self.assertEqual(coder(input), (expect, len(input)))
return check
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self):
self._buffer = ""
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = ""
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class ReadTest(unittest.TestCase):
def check_partial(self, input, partialresults):
# get a StreamReader for the encoding and feed the bytestring version
# of input to the reader byte by byte. Read everything available from
# the StreamReader and check that the results equal the appropriate
# entries from partialresults.
q = Queue()
r = codecs.getreader(self.encoding)(q)
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
q.write(c)
result += r.read()
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(r.read(), u"")
self.assertEqual(r.bytebuffer, "")
self.assertEqual(r.charbuffer, u"")
# do the check again, this time using an incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(c)
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode("", True), u"")
self.assertEqual(d.buffer, "")
# Check whether the reset method works properly
d.reset()
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(c)
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode("", True), u"")
self.assertEqual(d.buffer, "")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
u"".join(codecs.iterdecode(encoded, self.encoding))
)
def test_readline(self):
def getreader(input):
stream = StringIO.StringIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = u"foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = u"foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = u"foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
lineends = ("\n", "\r\n", "\r", u"\u2028")
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate(lineends):
vw.append((i*200+200)*u"\u3042" + lineend)
vwo.append((i*200+200)*u"\u3042")
self.assertEqual(readalllines("".join(vw), True), "|".join(vw))
self.assertEqual(readalllines("".join(vw), False), "|".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in xrange(80):
for lineend in lineends:
s = 10*(size*u"a" + lineend + u"xxx\n")
reader = getreader(s)
for i in xrange(10):
self.assertEqual(
reader.readline(keepends=True),
size*u"a" + lineend,
)
self.assertEqual(
reader.readline(keepends=True),
"xxx\n",
)
reader = getreader(s)
for i in xrange(10):
self.assertEqual(
reader.readline(keepends=False),
size*u"a",
)
self.assertEqual(
reader.readline(keepends=False),
"xxx",
)
def test_mixed_readline_and_read(self):
lines = ["Humpty Dumpty sat on a wall,\n",
" | Humpty Dumpty had a great fall.\r\n",
"All the king's horses and all the | king's men\r",
"Couldn't put Humpty together again."]
data = ''.join(lines)
def getreader():
stream = StringIO.StringIO(data.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
# Issue #8260: Test readline() followed by read()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.read(), ''.join(lines[1:]))
self.assertEqual(f.read(), '')
# Issue #32110: Test readline() followed by read(n)
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.read(1), lines[1][0])
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(100), data[len(lines[0]) + 1:][:100])
# Issue #16636: Test readline() followed by readlines()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.readlines(), lines[1:])
self.assertEqual(f.read(), '')
# Test read(n) followed by read()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(), data[5:])
self.assertEqual(f.read(), '')
# Issue #32110: Test read(n) followed by read(n)
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(1), data[5])
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(100), data[6:106])
# Issue #12446: Test read(n) followed by readlines()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.readlines(), [lines[0][5:]] + lines[1:])
self.assertEqual(f.read(), '')
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse() # descending\r\n',
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' |
bitonic/troppotardi | troppotardi/lib/thumbnailer.py | Python | gpl-2.0 | 2,794 | 0.003937 | from PIL import Image
import os
from pylons import config
def resize_image(filename, max_width=None, max_height=None, crop=False):
"""Given the filename of an image in the image dir,
it resizes it"""
im = Image.open(os.path.join(config['images_dir'], filename))
(width, height) = im.size
# If the image is smaller then the max size, we don't touch it
if not (width <= max_width and height <= max_height):
if crop and max_width and max_height:
if width / float(height) >= max_width / float(max_height):
thumb_height = max_height
thumb_width = width * max_height / height
sx = (thumb_width - max_width) / 2
up = 0
dx = sx + max_width
low = max_height
else:
thumb_width = max_width
thumb_height = max_width * height / width
sx = 0
up = (thumb_height - max_height) /2
dx = max_width
low = max_height + up
im.thumbnail((thumb_width, thumb_height), Image.ANTIALIAS)
im = im.crop((sx, up, dx, low))
else:
if max_height:
new_width = width * max_height / height
if max_width:
new_height = height * max_width / width
if max_height and max_width:
if new_width > max_width:
max_height = None
else:
max_width = None
# Calculate the size...
if max_width and not max_height:
max_height = new_height
elif max_height and not max_width:
max_width = new_width
im.thumbnail((max_width, max_height), Image.ANTIALIAS)
return im
def thumbnailer(filename, max_width=None, max_height=None, crop=False, save=True):
"""Given the filename of an image in the image dir,
it thumbnails it and stores it in the thumbnails directory
(both directories are defined in the ini file).
It then returns the url of the thumbnail."""
name, ext = os.path.splitext(filename)
| name = name + '_' + str(max_width) + 'x' + str(max_height)
if crop:
name += '_crop_'
name += ext
# If the thumbnail already exists, don' | t create it.
# This could be dangerous if the image could be changed, but
# the image cannot be changed right now, so it should be safe.
if not os.path.isfile(os.path.join(config['thumbs_dir'], name)):
im = resize_image(filename, max_width, max_height, crop)
im.save(os.path.join(config['thumbs_dir'], name))
return os.path.join(config['thumbs_base_url'], name)
|
dNG-git/pas_media | src/dNG/data/media/abstract_audio.py | Python | gpl-2.0 | 1,708 | 0.000585 | # -*- coding: | utf-8 -*-
"""
direct PAS
Pyt | hon Application Services
----------------------------------------------------------------------------
(C) direct Netware Group - All rights reserved
https://www.direct-netware.de/redirect?pas;media
The following license agreement remains valid unless any additions or
changes are being made by direct Netware Group in a written form.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
----------------------------------------------------------------------------
https://www.direct-netware.de/redirect?licenses;gpl
----------------------------------------------------------------------------
#echo(pasMediaVersion)#
#echo(__FILEPATH__)#
"""
from .abstract import Abstract
class AbstractAudio(Abstract):
"""
Implementation independent audio class.
:author: direct Netware Group et al.
:copyright: direct Netware Group - All rights reserved
:package: pas
:subpackage: media
:since: v0.2.00
:license: https://www.direct-netware.de/redirect?licenses;gpl
GNU General Public License 2
"""
pass
#
|
cwells/franz | lib/pygments.py | Python | mit | 1,729 | 0.002895 | import importlib
from pygments.lexer import RegexLexer, bygroups
from pygments.styles import STYLE_MAP
from pygments.token import *
def load_style(full_class_string):
modulename, styleclass = full_class_string.split('::')
module = importlib.import_module("pygments.styles." + modulename)
return getattr(module, styleclass)
repl_styles = {}
for name, import_info in STYLE_MAP.items():
repl_styles[name] = load_style(import_info)
repl_styles[name].styles[Whitespace] = '' # some styles underline ws
class FranzLexer(RegexLexer):
name = 'Franz Lexer'
tokens = {
'root': [
(r'"', String.Double, 'double-quote'),
(r'[0-9]+(\.[0-9]+)?', Number),
(r'\b(if|else|for|while|in|to|fn|ⲗ|try|rescue|assert|include|yield|return|break|continue)\b', Keyword.Reserved),
(r'\b(int|str|any|float|list|dict|bool)\b', Keyword.Type),
(r'\b(and|or|not)\b', Operator.Word),
(r'#.*?$', Comment.Single),
(r'([a-zA-Z][a-zA-Z0-9_!?\-%$]*)(\s*)(= | )(\s*)(fn)',
bygroups(Name.Function.Definition, Whitespace, Operator, Whitespace, Keyword.Reserved)),
(r'\b([a-zA-Z][a-zA-Z0-9_!?\-%$]*)(\s*)([(])', bygroups(Name.Function, Whitespace, Punctuation)),
(r'\b[a-zA-Z][a-zA-Z0-9_!?\-%$]*\b', Name),
(r'\s+([*+\-^=<>%/?]+)\s+', O | perator),
(r'[@().,:;\[\]]', Punctuation),
(r'[{}]', Punctuation.Braces),
(r'\s+', Whitespace)
],
'double-quote': [
(r'\{.*?\}', String.Interpol),
(r'\\.', Literal.String.Escape),
(r'[^"{}\\]+', String.Double),
(r'"', String.Double, '#pop'),
]
} |
beekpr/wsgiservice | wsgiservice/resource.py | Python | bsd-2-clause | 58,294 | 0.001269 | import hashlib
import inspect
import json
import logging
import re
from xml.sax.saxutils import escape as xml_escape
import webob
from wsgiservice import xmlserializer
from wsgiservice.decorators import mount
from wsgiservice.exceptions import (MultiValidationException, ResponseException,
ValidationException)
from wsgiservice.status import *
logger = logging.getLogger(__name__)
class Resource(object):
"""Base class for all WsgiService resources. A resource is a unique REST
endpoint which accepts different methods for different actions.
For each HTTP call the corresponding method (equal to the HTTP method)
will be called.
"""
#: The root tag for generated XML output. Used by :func:`to_text_xml`.
#: (Default: 'response')
XML_ROOT_TAG = 'response'
#: List of the known HTTP methods. Used by :func:`get_method` to handle
#: methods that are not implemented. (Default: All methods defined by the
#: HTTP 1.1 standard :rfc:`2616`)
KNOWN_METHODS = ['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'DELETE',
'TRACE', 'CONNECT']
#: List of tuples mapping file extensions to MIME types. The first item of
#: the tuple is the extension and the second is the associated MIME type.
#: Used by :func:`get_content_type` to determine the requested MIME type.
#: (Default: '.xml' and '.json').
EXTENSION_MAP = [
('.xml', 'text/xml'),
('.json', 'application/json'),
]
#: A tuple of exceptions that should be treated as 404. An ideal candidate
#: is KeyError if you do dictionary accesses. Used by :func:`call` which
#: calls :func:`handle_exception_404` whenever an exception from this
#: tuple occurs. (Default: Empty tuple)
NOT_FOUND = ()
#: A tuple of absolute paths that should return a 404. By default this is
#: used to ignored requests for favicon.ico and robots.txt so that
#: browsers don't cause too many exceptions.
IGNORED_PATHS = ('/favicon.ico', '/robots.txt')
#: Whether the input parameters from GET and POST should be decoded
#: according to the encoding specified by the request. This should only be
#: changed to False if the input is supposed to be byte values. (Default:
#: True)
DECODE_PARAMS = True
#: Object representing the current request. Set by the constructor.
request = None
#: Object representing the current response. Set by the constructor.
response = None
#: Dictionary with the path parameters. Set by the constructor.
path_params = None
#: String with the current path. Same as request.path except the extension
#: is removed. So instead of `/movies.json` it is just `/movies`. Set by
#: the constructor.
request_path = None
#: Reference to the application. Set by the constructor.
application = None
#: Charset to o | utput in the Content-Type headers. Set to None to avoid
#: sending this.
charset = 'UTF-8'
# Cache for the `data` property
_data = None
def __init__(self, request, response, path_params, application=None):
"""Constructor. Order of the parameters is not guaranteed, always
used named parameters.
:param request: Object representing the current request.
:type request: :class:`webob.Reques | t`
:param response: Object representing the response to be sent.
:type response: :class:`webob.Response`
:param path_params: Dictionary of all parameters passed in via the
path. This is the return value of
:func:`Router.__call__`.
:type path_params: dict
:param application: Reference to the application which is calling this
resource. Can be used to reference other resources
or properties of the application itself.
:type path_params: :class:`wsgiservice.Application`
"""
self.request = request
self.response = response
self.path_params = path_params
self.application = application
self.request_path = ''
if request:
self.request_path = request.path
if path_params and path_params.get('_extension'):
ext = path_params['_extension']
if self.request_path.endswith(ext):
self.request_path = self.request_path[0:-len(ext)]
def OPTIONS(self):
"""Default implementation of the OPTIONS verb. Outputs a list of
allowed methods on this resource in the ``Allow`` response header.
"""
self.response.headers['Allow'] = self.get_allowed_methods()
def __call__(self):
"""Main entry point for calling this resource. Handles the method
dispatching, response conversion, etc. for this resource.
Catches all exceptions:
- :class:`webob.exceptions.ResponseException`: Replaces the
instance's response attribute with the one from the exception.
- For all exceptions in the :attr:`NOT_FOUND` tuple
:func:`handle_exception_404` is called.
- :class:`wsgiservice.exceptions.ValidationException`:
:func:`handle_exception_400` is called.
- For all other exceptions deriving from the :class:`Exception`
base class, the :func:`handle_exception` method is called.
"""
self.type = self.get_content_type()
try:
self.method = self.get_method()
self.handle_ignored_resources()
self.assert_conditions()
self.response.body_raw = self.call_method(self.method)
except ResponseException, e:
# a response was raised, catch it
self.response = e.response
r = e.response
if r.status_int == 404 and not r.body and not hasattr(r, 'body_raw'):
self.handle_exception_404(e)
except self.NOT_FOUND, e:
self.handle_exception_404(e)
except ValidationException, e:
self.handle_exception_400(e)
except Exception, e:
self.handle_exception(e)
self.convert_response()
self.set_response_headers()
return self.response
@property
def data(self):
"""Returns the request data as a dictionary.
Merges the path parameters, GET parameters and POST parameters
(form-encoded or JSON dictionary). If a key is present in multiple of
these, the first one defined is used.
"""
if self._data:
return self._data
retval = {}
data = self.get_request_data()
for subdata in data:
for key, value in subdata.iteritems():
if not key in retval:
retval[key] = value
self._data = retval
return retval
def get_resource(self, resource, **kwargs):
"""Returns a new instance of the resource class passed in as resource.
This is a helper to make future-compatibility easier when new
arguments get added to the constructor.
:param resource: Resource class to instantiate. Gets called with the
named arguments as required for the constructor.
:type resource: :class:`Resource`
:param kwargs: Additional named arguments to pass to the constructor
function.
:type kwargs: dict
"""
return resource(request=self.request, response=self.response,
path_params=self.path_params, application=self.application,
**kwargs)
def get_method(self, method=None):
"""Returns the method to call on this instance as a string. Raises a
HTTP exception if no method can be found. Aborts with a 405 status
code for known methods (based on the :attr:`KNOWN_METHODS` list) and a
501 status code for all other methods.
:param method: Name of the method to return. Must be all-uppercase.
:type method: str
:raises: :class:`webob.exceptions.ResponseException` of status 405 or
|
phaethon/scapy | kamene/layers/rtp.py | Python | gpl-2.0 | 1,435 | 0.008362 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
RTP (Real-time Transport Protocol).
"""
from kamene.packet import *
from kamene.fields import *
_rtp_payload_types = {
# http://www.iana.org/assignments/rtp-parameters
0: 'G.711 PCMU', 3: 'GSM',
4: 'G723', 5: 'DVI4',
6: 'DVI4', 7: 'LPC',
8: 'PCMA', 9: 'G722',
10: 'L16', 11: 'L16',
12: 'QCELP', 13: 'CN',
14: 'MPA', 15: 'G728',
16: 'DVI4', 17: 'DVI4',
18: 'G729', 25: 'CelB',
26: 'JPEG', 28: 'nv',
31: 'H261', 32: 'MPV',
33: 'MP2T', 34: 'H263' }
class RTP(Packet):
name="RTP"
fields_desc = [ BitField('version', 2, 2),
BitField('padding', 0, 1),
BitField('extension', 0, 1),
BitFieldLenField('numsyn | c', None | , 4, count_of='sync'),
BitField('marker', 0, 1),
BitEnumField('payloadtype', 0, 7, _rtp_payload_types),
ShortField('sequence', 0),
IntField('timestamp', 0),
IntField('sourcesync', 0),
FieldListField('sync', [], IntField("id",0), count_from=lambda pkt:pkt.numsync) ]
|
Dangetsu/vnr | Frameworks/Sakura/py/apps/reader/views/topicview.py | Python | gpl-3.0 | 12,896 | 0.01768 | # coding: utf8
# topicview.py
# 1/2/2015 jichi
if __name__ == '__main__':
import sys
sys.path.append('..')
import debug
debug.initenv()
import json
from functools import partial
from PySide.QtCore import Qt, QObject
from Qt5 import QtWidgets
from sakurakit import skevents, skqss
from sakurakit.skclass import Q_Q, memoized, memoizedproperty
from sakurakit.skdebug import dprint, dwarn
from sakurakit.sktr import tr_
from sakurakit.skwebkit import SkWebView #, SkWebViewBean
from sakurakit.skwidgets import SkTitlelessDockWidget, SkStyleView
#from sakurakit.skqml import QmlObject
from mytr import mytr_
import comets, config, dataman, netman, osutil, rc
@Q_Q
class _TopicView(object):
def __init__(self, q):
self.topicId = 0 # long
self.topicComet = None
self._createUi(q)
#shortcut('ctrl+n', self._new, parent=q)
def _createUi(self, q):
q.setCentralWidget(self.webView)
dock = SkTitlelessDockWidget(self.inspector)
dock.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
#dock.setAllowedAreas(Qt.BottomDockWidgetArea)
q.addDockWidget(Qt.BottomDockWidgetArea, dock)
def clear(self):
self.setTopicId(0)
def setTopicId(self, topicId): # long ->
if self.topicId != topicId:
self.topicId = topicId
if not topicId:
if self.topicComet:
self.topicComet.setActive(False)
else:
if not self.topicComet:
self.topicComet = comets.createPostComet()
qml = self.topicComet.q
#qml.topicDataReceived.connect(self._onTopicReceived)
qml.topicDataUp | dated.connect(self._onTopicUpdated)
qml.postDataUpdated.connect(self._onPostUpdated)
qml.postDataReceived.connect(self._onPostReceived)
path = 'topic/%s' % topicId
self.topicComet.setPath(path)
if netman.manager().isOnline():
self.topicComet.setActive(True)
def | _injectBeans(self):
h = self.webView.page().mainFrame()
#h.addToJavaScriptWindowObject('bean', self._webBean)
for name,obj in self._beans:
h.addToJavaScriptWindowObject(name, obj)
@memoizedproperty
def _beans(self):
"""
return [(unicode name, QObject bean)]
"""
import coffeebean
m = coffeebean.manager()
return (
('cacheBean', m.cacheBean),
('i18nBean', m.i18nBean),
('mainBean', m.mainBean),
('topicEditBean', self.topicEditBean),
#('topicInputBean', self.topicInputBean),
('postEditBean', self.postEditBean),
('postInputBean', self.postInputBean),
)
@memoizedproperty
def postEditBean(self):
import postedit
return postedit.PostEditorManagerBean(parent=self.q, manager=self.postEditorManager)
@memoizedproperty
def postInputBean(self):
import postinput
return postinput.PostInputManagerBean(parent=self.q, manager=self.postInputManager)
@memoizedproperty
def topicEditBean(self):
import topicedit
return topicedit.TopicEditorManagerBean(parent=self.q, manager=self.topicEditorManager)
#@memoizedproperty
#def topicInputBean(self):
# import topicinput
# return topicinput.TopicInputManagerBean(parent=self.q, manager=self.topicInputManager)
@memoizedproperty
def postEditorManager(self):
import postedit
ret = postedit.PostEditorManager(self.q)
ret.postChanged.connect(self._updatePost)
return ret
@memoizedproperty
def postInputManager(self):
import postinput
ret = postinput.PostInputManager(self.q)
ret.postReceived.connect(self._submitPost)
return ret
@memoizedproperty
def topicEditorManager(self):
import topicedit
ret = topicedit.TopicEditorManager(self.q)
ret.topicChanged.connect(self._updateTopic)
return ret
#@memoizedproperty
#def topicInputManager(self):
# import topicinput
# ret = topicinput.TopicInputManager(self.q)
# ret.topicReceived.connect(self._submitTopic)
# return ret
def _submitPost(self, postData, imageData):
if self.topicId and netman.manager().isOnline():
import forumapi
skevents.runlater(partial(forumapi.manager().submitPost,
postData, imageData,
topicId=self.topicId))
def _updatePost(self, postData, imageData):
if self.topicId and netman.manager().isOnline():
import forumapi
skevents.runlater(partial(forumapi.manager().updatePost,
postData, imageData))
#def _submitTopic(self, topicData, imageData, ticketData):
# subjectId = self.subjectId
# if subjectId:
# subjectType = 'game'
# else:
# subjectId = config.GLOBAL_SUBJECT_ID
# subjectType = 'subject'
# if netman.manager().isOnline():
# import forumapi
# skevents.runlater(partial(forumapi.manager().submitTopic,
# topicData, imageData, ticketData,
# subjectId=subjectId, subjectType=subjectType))
def _updateTopic(self, topicData, imageData, ticketData):
if netman.manager().isOnline():
import forumapi
skevents.runlater(partial(forumapi.manager().updateTopic,
topicData, imageData, ticketData))
def _onPostReceived(self, data): # str ->
try:
obj = json.loads(data)
topicId = obj['topicId']
if topicId == self.topicId and self.q.isVisible():
self.addPost(data)
dprint("pass")
except Exception, e:
dwarn(e)
def _onPostUpdated(self, data): # str ->
try:
obj = json.loads(data)
topicId = obj['topicId']
if topicId == self.topicId and self.q.isVisible():
self.updatePost(data)
dprint("pass")
except Exception, e:
dwarn(e)
#def _onTopicReceived(self, data): # str ->
# try:
# obj = json.loads(data)
# subjectId = obj['subjectId']
# if subjectId == self.subjectId and self.q.isVisible():
# self.addTopic(data)
# dprint("pass")
# except Exception, e:
# dwarn(e)
def _onTopicUpdated(self, data): # str ->
try:
obj = json.loads(data)
topicId = obj['id']
if topicId == self.topicId and self.q.isVisible():
self.updateTopic(data)
dprint("pass")
except Exception, e:
dwarn(e)
@memoizedproperty
def webView(self):
from PySide.QtWebKit import QWebPage
ret = SkWebView()
ret.titleChanged.connect(self.q.setWindowTitle)
ret.enableHighlight() # highlight selected text
ret.ignoreSslErrors() # needed to access Twitter
ret.pageAction(QWebPage.Reload).triggered.connect(
self.refresh, Qt.QueuedConnection)
ret.page().setLinkDelegationPolicy(QWebPage.DelegateAllLinks) # Since there are local images
ret.page().mainFrame().setScrollBarPolicy(Qt.Horizontal, Qt.ScrollBarAlwaysOff) # disable horizontal scroll
#ret.page().setLinkDelegationPolicy(QWebPage.DelegateExternalLinks)
ret.linkClicked.connect(osutil.open_url)
return ret
def refresh(self):
"""@reimp"""
self.newPostButton.setVisible(bool(self.topicId))
#self.gameButton.setVisible(bool(self.subjectId))
host = config.API_HOST # must be the same as rest.coffee for the same origin policy
user = dataman.manager().user()
w = self.webView
w.setHtml(rc.haml_template('haml/reader/topicview').render({
'host': host,
'locale': config.language2htmllocale(user.language),
'title': tr_("Topic"),
'topicId': self.topicId,
'userName': user.name if not user.isGuest() else '',
'userPassword': user.password,
'rc': rc,
'tr': tr_,
}), host)
self._injectBeans()
@memoizedproperty
def inspector(self):
ret = SkStyleView()
skqss.class_(ret, 'texture')
layout = QtWidgets.QHBoxLayout()
layout.addWidget(self.newPostButton)
#layout.addWidget(self.newTopicButton)
layout.addStretch()
layout.addWidget(self.browseButton)
#layout.addWidget(self.gameButton)
layout.addWidget(self.refreshButton)
ret.setLayout(layout)
layout.setContentsMargins(4, 4, 4, 4)
return ret
@memoizedproperty
def refreshButton(self):
ret = QtWidgets.QPushButton(tr_("Refresh"))
skqss.class_(ret, 'btn btn-primary')
ret.setToolTip(tr_("Refresh") + " (Ctrl+R)")
#ret.setStatusTip(ret.toolTip())
r |
repology/repology | repology/fetchers/fetchers/rsync.py | Python | gpl-3.0 | 2,001 | 0.001 | # Copyright (C) 2017-2020 Dmitry Marakasov <amdmi3@amdmi3.ru>
#
# This file is part of repology
#
# repology is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# repology is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with repology. If not, see <http://www.gnu.org/licenses/>.
import os
from repology.fetchers import Fetcher
from repology.logger import Logger, NoopLogger
from repology.subprocess import run_subprocess
class RsyncFetcher(Fetcher):
def __init__(self, url: str, fetch_timeout: int = 60, rsync_include: str | None = None, rsync_e | xclude: str | None = None) -> None:
self.url = url
self.fetch_timeout = fetch_timeout
self.rsync_include = rsync_in | clude
self.rsync_exclude = rsync_exclude
def fetch(self, statepath: str, update: bool = True, logger: Logger = NoopLogger()) -> bool:
if os.path.exists(statepath) and not update:
logger.log('no update requested, skipping')
return False
args = [
'--info=stats2',
'--archive',
'--compress',
'--delete',
'--delete-excluded',
'--safe-links',
]
if self.fetch_timeout is not None:
args += ['--timeout', str(self.fetch_timeout)]
if self.rsync_include is not None:
args += ['--include', self.rsync_include]
if self.rsync_exclude is not None:
args += ['--exclude', self.rsync_exclude]
run_subprocess(['rsync'] + args + [self.url, statepath], logger)
return True
|
xjw1001001/IGCexpansion | setup.py | Python | gpl-3.0 | 347 | 0.040346 | fr | om distutils.core import setup
setup(name = 'IGCexpansion',
version = '0.2-dev',
author = 'Xiang Ji',
url = 'https://github.com/xji3/IGCexpansion',
#download_url = 'https://github.com/xji3/Genconv/tree/master/IGCexpansion/',
packages = ['IGCexpansion',],
#long_description = open('README.md').read()
| )
|
hlt-mt/tensorflow | tensorflow/models/rnn/fbk_nmt/seq2seq_model.py | Python | apache-2.0 | 13,619 | 0.004038 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sequence-to-sequence model with an attention mechanism."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.rnn import rnn_cell
from tensorflow.models.rnn import seq2seq
from tensorflow.models.rnn.translate import data_utils
class Seq2SeqModel(object):
"""Sequence-to-sequence model with attention and for multiple buckets.
This class implements a multi-layer recurrent neural network as encoder,
and an attention-based decoder. This is the same as the model described in
this paper: http://arxiv.org/abs/1412.7449 - please look there for details,
or into the seq2seq library for complete model implementation.
This class also allows to use GRU cells in addition to LSTM cells, and
sampled softmax to handle large output vocabulary size. A single-layer
version of this model, but with bi-directional encoder, was presented in
http://arxiv.org/abs/1409.0473
and sampled softmax is described in Section 3 of the following paper.
http://arxiv.org/pdf/1412.2007v2.pdf
"""
def __init__(self, source_vocab_size, target_vocab_size, buckets, size,
num_layers, max_gradient_norm, batch_size, learning_rate,
learning_rate_decay_factor, use_lstm=False,
num_samples=512, forward_only=False):
"""Create the model.
Args:
source_vocab_size: size of the source vocabulary.
target_vocab_size: size of the target vocabulary.
buckets: a list of pairs (I, O), where I specifies maximum input length
that will be processed in that bucket, and O specifies maximum output
length. Training instances that have inputs longer than I or outputs
longer than O will be pushed to the next bucket a | nd padded accordingly.
We assume that the list is sorted, e.g., [(2, 4), (8, 16)].
size: number of units in each layer of the model.
num_layers: number of layers in the model.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training;
| the model construction is independent of batch_size, so it can be
changed after initialization if this is convenient, e.g., for decoding.
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when needed.
use_lstm: if true, we use LSTM cells instead of GRU cells.
num_samples: number of samples for sampled softmax.
forward_only: if set, we do not construct the backward pass in the model.
"""
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
self.learning_rate_decay_op = self.learning_rate.assign(
self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
# If we use sampled softmax, we need an output projection.
output_projection = None
softmax_loss_function = None
# Sampled softmax only makes sense if we sample less than vocabulary size.
if num_samples > 0 and num_samples < self.target_vocab_size:
with tf.device("/cpu:0"):
w = tf.get_variable("proj_w", [size, self.target_vocab_size])
w_t = tf.transpose(w)
b = tf.get_variable("proj_b", [self.target_vocab_size])
output_projection = (w, b)
def sampled_loss(inputs, labels):
with tf.device("/cpu:0"):
labels = tf.reshape(labels, [-1, 1])
return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, num_samples,
self.target_vocab_size)
softmax_loss_function = sampled_loss
# Create the internal multi-layer cell for our RNN.
single_cell = rnn_cell.GRUCell(size)
if use_lstm:
single_cell = rnn_cell.BasicLSTMCell(size)
cell = single_cell
if num_layers > 1:
cell = rnn_cell.MultiRNNCell([single_cell] * num_layers)
# The seq2seq function: we use embedding for the input and attention.
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
return seq2seq.embedding_attention_seq2seq(
encoder_inputs, decoder_inputs, cell, source_vocab_size,
target_vocab_size, output_projection=output_projection,
feed_previous=do_decode)
# Feeds for inputs.
self.encoder_inputs = []
self.decoder_inputs = []
self.target_weights = []
for i in xrange(buckets[-1][0]): # Last bucket is the biggest one.
self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="encoder{0}".format(i)))
for i in xrange(buckets[-1][1] + 1):
self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="decoder{0}".format(i)))
self.target_weights.append(tf.placeholder(tf.float32, shape=[None],
name="weight{0}".format(i)))
# Our targets are decoder inputs shifted by one.
targets = [self.decoder_inputs[i + 1]
for i in xrange(len(self.decoder_inputs) - 1)]
# Training outputs and losses.
if forward_only:
self.outputs, self.losses = seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets, self.target_vocab_size,
lambda x, y: seq2seq_f(x, y, True),
softmax_loss_function=softmax_loss_function)
# If we use output projection, we need to project outputs for decoding.
if output_projection is not None:
for b in xrange(len(buckets)):
self.outputs[b] = [tf.nn.xw_plus_b(output, output_projection[0],
output_projection[1])
for output in self.outputs[b]]
else:
self.outputs, self.losses = seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets, self.target_vocab_size,
lambda x, y: seq2seq_f(x, y, False),
softmax_loss_function=softmax_loss_function)
# Gradients and SGD update operation for training the model.
params = tf.trainable_variables()
if not forward_only:
self.gradient_norms = []
self.updates = []
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
for b in xrange(len(buckets)):
gradients = tf.gradients(self.losses[b], params)
clipped_gradients, norm = tf.clip_by_global_norm(gradients,
max_gradient_norm)
self.gradient_norms.append(norm)
self.updates.append(opt.apply_gradients(
zip(clipped_gradients, params), global_step=self.global_step))
self.saver = tf.train.Saver(tf.all_variables())
def step(self, session, encoder_inputs, decoder_inputs, target_weights,
bucket_id, forward_only):
"""Run a step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: list of numpy int vectors to feed as encoder inputs.
decoder_inputs: list of numpy int vectors to feed as decoder inputs.
|
tuna/gdanmaku | gdanmaku/danmaku.py | Python | gpl-3.0 | 3,347 | 0.000598 | #!/usr/bin/env python2
# -*- coding:utf-8 -*-
import signal
import json
import argparse
import threading
import requests
from settings import load_config
from app import GDanmakuApp
from server_selection import ServerSelectionWindow
from danmaku_ui import Danmaku
from gi.repository import Gtk, GLib, GObject
class Main(object):
def __init__(self, server=None):
self.server = server
server_selection = ServerSelectionWindow(self.server)
server_selection.connect('server-selected', self.on_server_selected)
self.app = GDanmakuApp(self)
self.thread_sub = None
self.enabled = True
self.options = load_config()
self.live_danmakus = {}
def _subscribe_danmaku(self, server, channel, password):
print("subscribing from server: {}, channel: {}".format(server, channel))
uri = self.options["http_stream_uri"].format(cname=channel)
if uri.startswith("/") and server.endswith("/"):
server = server[:-1]
url = server + uri
while 1:
try:
res = requests.get(
url, headers={"X-GDANMAKU-AUTH-KEY": password})
except requests.exceptions.ConnectionError:
continue
if res.status_code == 200 and res.text:
try:
dm_opts = json.loads(res.text)
except:
continue
else:
GLib.idle_add(self.new_danmaku, dm_opts)
def new_danmaku(self, dm_opts):
if not self.enabled:
return
for opt in dm_opts:
try:
dm = Danmaku(**opt)
dm.connect('delete-event', self.on_danmaku_delete)
ex | cept Exception as e:
print(e)
continue
self.live_danmakus[id(dm)] = dm
def on_danmaku_delete(self, dm, event):
self.live_danmakus.pop(id(dm))
def toggle_danmaku(self):
self.enabled = not self.enabled
if not self.enabled:
for _, dm in self.live_danmakus.iteritems():
dm.hide()
dm._clean_exit( | )
def on_server_selected(self, widget, server, channel, password):
thread_sub = threading.Thread(
target=self._subscribe_danmaku, args=(server, channel, password))
thread_sub.daemon = True
thread_sub.start()
self.thread_sub = thread_sub
def run(self):
GObject.threads_init()
Gtk.main()
def app_config():
from config_panel import ConfigPanel
from gi.repository import Gtk
ConfigPanel()
Gtk.main()
def main():
options = load_config()
parser = argparse.ArgumentParser(prog="gdanmaku")
parser.add_argument(
"--server",
type=str,
default=options["http_stream_server"],
help="danmaku stream server"
)
parser.add_argument(
'--config',
action="store_true",
help="run configuration window"
)
args = parser.parse_args()
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
if args.config:
app_config()
else:
main_app = Main(args.server)
main_app.run()
if __name__ == '__main__':
main()
# vim: ts=4 sw=4 sts=4 expandtab
|
bgris/ODL_bgris | lib/python3.5/_pyio.py | Python | gpl-3.0 | 87,968 | 0.000739 | """
Python implementation of the io module.
"""
import os
import abc
import codecs
import errno
import array
import stat
import sys
# Import _thread instead of threading to reduce startup cost
try:
from _thread import allocate_lock as Lock
except ImportError:
from _dummy_thread import allocate_lock as Lock
if sys.platform in {'win32', 'cygwin'}:
from msvcrt import setmode as _setmode
else:
_setmode = None
import io
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
valid_seek_flags = {0, 1, 2} # Hardwired values
if hasattr(os, 'SEEK_HOLE') :
valid_seek_flags.add(os.SEEK_HOLE)
valid_seek_flags.add(os.SEEK_DATA)
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't want
# to inherit the C implementations.
# Rebind for compatibility
BlockingIOError = BlockingIOError
def open(file, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True, opener=None):
r"""Open file and return a stream. Raise OSError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file is
opened. It defaults to 'r' which means open for reading in text mode. Other
common values are 'w' for writing (truncating the file if it already
exists), 'x' for exclusive creation of a new file, and 'a' for appending
(which on some Unix systems, means that all writes append to the end of the
file regardless of the current seek position). In text mode, if encoding is
not specified the encoding used is platform dependent. (For reading and
writing raw bytes use binary mode and leave encoding unspecified.) The
available modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'x' create a new file and open it for writing
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (deprecated)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation. The 'x' mode implies 'w' and
raises an `FileExistsError` if the file already exists.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
'U' mode is deprecated and will raise an exception in future versions
of Python. It has no effect in Python 3. Use newline to control
universal newlines mode.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the str name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline is a string controlling how universal newlines works (it only
applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works
as follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
closedfd is a bool. If closefd is False, the underlying file descriptor will
be kept open when the file is closed. This does not work when a file name is
given and must be True in that case.
The newly created file is non-inheritable.
A custom opener can be used by passing a callable as *opener*. The
underlying file descriptor for the file object is then obtained by calling
*opener* with (*file*, *flags*). *opener* must return an open file
descriptor (passing os.open as *opener* results in functionality similar to
passing None).
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns | a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a Buffered | Random.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, (str, bytes, int)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, str):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, int):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, str):
|
joopert/home-assistant | homeassistant/components/mobile_app/helpers.py | Python | apache-2.0 | 5,246 | 0.000762 | """Helpers for mobile_app."""
import json
import logging
from typing import Callable, Dict, Tuple
from aiohttp.web import Response, json_response
from nacl.encoding import Base64Encoder
from nacl.secret import SecretBox
from homeassistant.core import Context
from homeassistant.helpers.json import JSONEncoder
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
ATTR_APP_DATA,
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_APP_VERSION,
ATTR_DEVICE_ID,
ATTR_DEVICE_NAME,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_OS_VERSION,
ATTR_SUPPORTS_ENCRYPTION,
CONF_SECRET,
CONF_USER_ID,
DATA_BINARY_SENSOR,
DATA_DELETED_IDS,
DATA_SENSOR,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
def setup_decrypt() -> Tuple[int, Callable]:
"""Return decryption function and length of key.
Async friendly.
"""
def decrypt(ciphertext, key):
"""Decrypt ciphertext using key."""
return SecretBox(key).decrypt(ciphertext, encoder=Base64Encoder)
return (SecretBox.KEY_SIZE, decrypt)
def setup_encrypt() -> Tuple[int, Callable]:
"""Return encryption function and length of key.
Async friendly.
"""
def encrypt(ciphertext, key):
"""Encrypt ciphertext using key."""
return SecretBox(key).encrypt(ciphertext, encoder=Base64Encoder)
return (SecretB | ox.KEY_SIZE, encrypt)
def _decrypt_payload(key: str, ciphertext: str) -> Dict[str, str]:
"""Decrypt encrypted payload."""
try:
keylen, decrypt = setup_decrypt()
except OSError:
_LOGGER.warning("Ignoring encrypted payload because libsodium not installed")
return None
if key is None:
_LOGGER.warning("Ignoring encrypted payload because no decryptio | n key known")
return None
key = key.encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b"\0")
try:
message = decrypt(ciphertext, key)
message = json.loads(message.decode("utf-8"))
_LOGGER.debug("Successfully decrypted mobile_app payload")
return message
except ValueError:
_LOGGER.warning("Ignoring encrypted payload because unable to decrypt")
return None
def registration_context(registration: Dict) -> Context:
"""Generate a context from a request."""
return Context(user_id=registration[CONF_USER_ID])
def empty_okay_response(headers: Dict = None, status: int = 200) -> Response:
"""Return a Response with empty JSON object and a 200."""
return Response(
text="{}", status=status, content_type="application/json", headers=headers
)
def error_response(
code: str, message: str, status: int = 400, headers: dict = None
) -> Response:
"""Return an error Response."""
return json_response(
{"success": False, "error": {"code": code, "message": message}},
status=status,
headers=headers,
)
def supports_encryption() -> bool:
"""Test if we support encryption."""
try:
import nacl # noqa: F401 pylint: disable=unused-import
return True
except OSError:
return False
def safe_registration(registration: Dict) -> Dict:
"""Return a registration without sensitive values."""
# Sensitive values: webhook_id, secret, cloudhook_url
return {
ATTR_APP_DATA: registration[ATTR_APP_DATA],
ATTR_APP_ID: registration[ATTR_APP_ID],
ATTR_APP_NAME: registration[ATTR_APP_NAME],
ATTR_APP_VERSION: registration[ATTR_APP_VERSION],
ATTR_DEVICE_NAME: registration[ATTR_DEVICE_NAME],
ATTR_MANUFACTURER: registration[ATTR_MANUFACTURER],
ATTR_MODEL: registration[ATTR_MODEL],
ATTR_OS_VERSION: registration[ATTR_OS_VERSION],
ATTR_SUPPORTS_ENCRYPTION: registration[ATTR_SUPPORTS_ENCRYPTION],
}
def savable_state(hass: HomeAssistantType) -> Dict:
"""Return a clean object containing things that should be saved."""
return {
DATA_BINARY_SENSOR: hass.data[DOMAIN][DATA_BINARY_SENSOR],
DATA_DELETED_IDS: hass.data[DOMAIN][DATA_DELETED_IDS],
DATA_SENSOR: hass.data[DOMAIN][DATA_SENSOR],
}
def webhook_response(
data, *, registration: Dict, status: int = 200, headers: Dict = None
) -> Response:
"""Return a encrypted response if registration supports it."""
data = json.dumps(data, cls=JSONEncoder)
if registration[ATTR_SUPPORTS_ENCRYPTION]:
keylen, encrypt = setup_encrypt()
key = registration[CONF_SECRET].encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b"\0")
enc_data = encrypt(data.encode("utf-8"), key).decode("utf-8")
data = json.dumps({"encrypted": True, "encrypted_data": enc_data})
return Response(
text=data, status=status, content_type="application/json", headers=headers
)
def device_info(registration: Dict) -> Dict:
"""Return the device info for this registration."""
return {
"identifiers": {(DOMAIN, registration[ATTR_DEVICE_ID])},
"manufacturer": registration[ATTR_MANUFACTURER],
"model": registration[ATTR_MODEL],
"device_name": registration[ATTR_DEVICE_NAME],
"sw_version": registration[ATTR_OS_VERSION],
}
|
syscoin/syscoin2 | test/util/syscoin-util-test.py | Python | mit | 6,594 | 0.003185 | #!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for syscoin utils.
Runs automatically during `make check`.
Can also be run manually."""
import argparse
import binascii
import configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.read_file(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "syscoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_d | iff(outputData.splitlines(True),
| outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, syscoin-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()
|
alexras/pylsdj | pylsdj/test_instrument.py | Python | mit | 1,954 | 0.002559 | import os
import json
from nose.tools import assert_equal
from .project import load_lsdsng
from .utils | import temporary_file
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
def _test_load_store_instrument(source_lsdsng, lsdinst_path, original_index):
proj = load_lsdsng(source_lsdsng)
proj.song.instruments.import_from_file(0x2a, lsdinst_path)
target_instr = proj.song.instruments[0x2a]
original_instr = proj.song.instruments[original_index]
assert_equal(original_instr, target_instr)
with temporary_file() as tmpfile:
| original_instr.export_to_file(tmpfile)
with open(tmpfile, 'r') as fp:
saved_inst = json.load(fp)
with open(lsdinst_path, 'r') as fp:
original_inst = json.load(fp)
assert_equal(original_inst, saved_inst)
def test_load_store_wave_instrument():
_test_load_store_instrument(
os.path.join(SCRIPT_DIR, 'test_data', 'UNTOLDST.lsdsng'),
os.path.join(SCRIPT_DIR, 'test_data', 'UNTOLDST_0x00_wave.lsdinst'),
0x00)
def test_load_store_pulse_instrument():
_test_load_store_instrument(
os.path.join(SCRIPT_DIR, 'test_data', 'UNTOLDST.lsdsng'),
os.path.join(SCRIPT_DIR, 'test_data', 'UNTOLDST_0x03_pulse.lsdinst'),
0x03)
def test_load_store_kit_instrument():
_test_load_store_instrument(
os.path.join(SCRIPT_DIR, 'test_data', 'UNTOLDST.lsdsng'),
os.path.join(SCRIPT_DIR, 'test_data', 'UNTOLDST_0x16_kit.lsdinst'),
0x16)
def test_load_store_noise_instrument():
_test_load_store_instrument(
os.path.join(SCRIPT_DIR, 'test_data', 'ANNARKTE.lsdsng'),
os.path.join(SCRIPT_DIR, 'test_data', 'ANNARKTE_0x06_noise.lsdinst'),
0x06)
def test_load_store_arduinoboy():
_test_load_store_instrument(
os.path.join(SCRIPT_DIR, 'test_data', 'ARDBOYxx.lsdsng'),
os.path.join(SCRIPT_DIR, 'test_data', 'MIDI.lsdinst'),
0x01)
|
abn/python-bugzilla | bugzilla/rhbugzilla.py | Python | gpl-2.0 | 14,369 | 0.000974 | # rhbugzilla.py - a Python interface to Red Hat Bugzilla using xmlrpclib.
#
# Copyright (C) 2008-2012 Red Hat Inc.
# Author: Will Woods <wwoods@redhat.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version. See http://www.gnu.org/copyleft/gpl.html for
# the full text of the license.
from logging import getLogger
from .base import Bugzilla
log = getLogger(__name__)
class RHBugzilla(Bugzilla):
'''
Bugzilla class for connecting Red Hat's forked bugzilla instance,
bugzilla.redhat.com
Historically this class used many more non-upstream methods, but
in 2012 RH started dropping most of its custom bits. By that time,
upstream BZ had most of the important functionality.
Much of the remaining code here is just trying to keep things operating
in python-bugzilla back compatible manner.
This class was written using bugzilla.redhat.com's API docs:
https://bugzilla.redhat.com/docs/en/html/api/
'''
def _init_class_state(self):
def _add_both_alias(newname, origname):
self._add_field_alias(newname, origname, is_api=False)
self._add_field_alias(origname, newname, is_bug=False)
_add_both_alias('fixed_in', 'cf_fixed_in')
_add_both_alias('qa_whiteboard', 'cf_qa_whiteboard')
_add_both_alias('devel_whiteboard', 'cf_devel_whiteboard')
_add_both_alias('internal_whiteboard', 'cf_internal_whiteboard')
self._add_field_alias('component', 'components', is_bug=False)
self._add_field_alias('version', 'versions', is_bug=False)
self._add_field_alias('sub_component', 'sub_components', is_bug=False)
# flags format isn't exactly the same but it's the closest approx
self._add_field_alias('flags', 'flag_types')
self._getbug_extra_fields = self._getbug_extra_fields + [
"comments", "description",
"external_bugs", "flags", "sub_components",
"tags",
]
self._supports_getbug_extra_fields = True
######################
# Bug update methods #
######################
def build_update(self, **kwargs):
# pylint: disable=arguments-differ
adddict = {}
def pop(key, destkey):
val = kwargs.pop(key, None)
if val is None:
return
adddict[destkey] = val
def get_sub_component():
val = kwargs.pop("sub_component", None)
if val is None:
return
if not isinstance(val, dict):
component = self._listify(kwargs.get("component"))
if not component:
raise ValueError("component must be specified if "
"specifying sub_component")
val = {component[0]: val}
adddict["sub_components"] = val
def get_alias():
# RHBZ has a custom extension to allow a bug to have multiple
# aliases, so the format of aliases is
# {"add": [...], "remove": [...]}
# But that means in order to approximate upstream, behavior
# whi | ch just overwrites the existing alias, we need to read
# the bug's state first to know what string to remove. Which
# we can't do, since we don't know the bug numbers at this point.
# So fail for now.
#
# The API should provide {"s | et": [...]}
# https://bugzilla.redhat.com/show_bug.cgi?id=1173114
#
# Implementation will go here when it's available
pass
pop("fixed_in", "cf_fixed_in")
pop("qa_whiteboard", "cf_qa_whiteboard")
pop("devel_whiteboard", "cf_devel_whiteboard")
pop("internal_whiteboard", "cf_internal_whiteboard")
get_sub_component()
get_alias()
vals = Bugzilla.build_update(self, **kwargs)
vals.update(adddict)
return vals
def add_external_tracker(self, bug_ids, ext_bz_bug_id, ext_type_id=None,
ext_type_description=None, ext_type_url=None,
ext_status=None, ext_description=None,
ext_priority=None):
"""
Wrapper method to allow adding of external tracking bugs using the
ExternalBugs::WebService::add_external_bug method.
This is documented at
https://bugzilla.redhat.com/docs/en/html/api/extensions/ExternalBugs/lib/WebService.html#add_external_bug
bug_ids: A single bug id or list of bug ids to have external trackers
added.
ext_bz_bug_id: The external bug id (ie: the bug number in the
external tracker).
ext_type_id: The external tracker id as used by Bugzilla.
ext_type_description: The external tracker description as used by
Bugzilla.
ext_type_url: The external tracker url as used by Bugzilla.
ext_status: The status of the external bug.
ext_description: The description of the external bug.
ext_priority: The priority of the external bug.
"""
param_dict = {'ext_bz_bug_id': ext_bz_bug_id}
if ext_type_id is not None:
param_dict['ext_type_id'] = ext_type_id
if ext_type_description is not None:
param_dict['ext_type_description'] = ext_type_description
if ext_type_url is not None:
param_dict['ext_type_url'] = ext_type_url
if ext_status is not None:
param_dict['ext_status'] = ext_status
if ext_description is not None:
param_dict['ext_description'] = ext_description
if ext_priority is not None:
param_dict['ext_priority'] = ext_priority
params = {
'bug_ids': self._listify(bug_ids),
'external_bugs': [param_dict],
}
log.debug("Calling ExternalBugs.add_external_bug(%s)", params)
return self._proxy.ExternalBugs.add_external_bug(params)
def update_external_tracker(self, ids=None, ext_type_id=None,
ext_type_description=None, ext_type_url=None,
ext_bz_bug_id=None, bug_ids=None,
ext_status=None, ext_description=None,
ext_priority=None):
"""
Wrapper method to allow adding of external tracking bugs using the
ExternalBugs::WebService::update_external_bug method.
This is documented at
https://bugzilla.redhat.com/docs/en/html/api/extensions/ExternalBugs/lib/WebService.html#update_external_bug
ids: A single external tracker bug id or list of external tracker bug
ids.
ext_type_id: The external tracker id as used by Bugzilla.
ext_type_description: The external tracker description as used by
Bugzilla.
ext_type_url: The external tracker url as used by Bugzilla.
ext_bz_bug_id: A single external bug id or list of external bug ids
(ie: the bug number in the external tracker).
bug_ids: A single bug id or list of bug ids to have external tracker
info updated.
ext_status: The status of the external bug.
ext_description: The description of the external bug.
ext_priority: The priority of the external bug.
"""
params = {}
if ids is not None:
params['ids'] = self._listify(ids)
if ext_type_id is not None:
params['ext_type_id'] = ext_type_id
if ext_type_description is not None:
params['ext_type_description'] = ext_type_description
if ext_type_url is not None:
params['ext_type_url'] = ext_type_url
if ext_bz_bug_id is not None:
params['ext_bz_bug_id'] = self._listify(ext_bz_bug_id)
if bug_ids is not None:
params['bug_ids'] = self._listify(bug_ids)
if ext_status is not None:
|
xifle/greensc | tools/scons/scons-local-2.0.1/SCons/Variables/PackageVariable.py | Python | gpl-3.0 | 3,612 | 0.003599 | """engine.SCons.Variables.PackageVariable
This file defines the option type for SCons implementing 'package
activation'.
To be used whenever a 'package' may be enabled/disabled and the
package path may be specified.
Usage example:
Examples:
x11=no (disables X11 support)
x11=yes (will search for the package installation dir)
x11=/usr/local/X11 (will check this path for existance)
To replace autoconf's --with-xxx=yyy
opts = Variables()
opts.Add(PackageVariable('x11',
'use X11 installed here (yes = search some places',
'yes'))
...
if env['x11'] == True:
dir = ... search X11 in some standard places ...
env['x11'] = dir
if env['x11']:
... build with x11 ...
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and | this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COP | YRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/PackageVariable.py 5134 2010/08/16 23:02:40 bdeegan"
__all__ = ['PackageVariable',]
import SCons.Errors
__enable_strings = ('1', 'yes', 'true', 'on', 'enable', 'search')
__disable_strings = ('0', 'no', 'false', 'off', 'disable')
def _converter(val):
"""
"""
lval = val.lower()
if lval in __enable_strings: return True
if lval in __disable_strings: return False
#raise ValueError("Invalid value for boolean option: %s" % val)
return val
def _validator(key, val, env, searchfunc):
# NB: searchfunc is currenty undocumented and unsupported
"""
"""
# todo: write validator, check for path
import os
if env[key] is True:
if searchfunc:
env[key] = searchfunc(key, val)
elif env[key] and not os.path.exists(val):
raise SCons.Errors.UserError(
'Path does not exist for option %s: %s' % (key, val))
def PackageVariable(key, help, default, searchfunc=None):
# NB: searchfunc is currenty undocumented and unsupported
"""
The input parameters describe a 'package list' option, thus they
are returned with the correct converter and validator appended. The
result is usable for input to opts.Add() .
A 'package list' option may either be 'all', 'none' or a list of
package names (seperated by space).
"""
help = '\n '.join(
(help, '( yes | no | /path/to/%s )' % key))
return (key, help, default,
lambda k, v, e: _validator(k,v,e,searchfunc),
_converter)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
dstruthers/blog.darrenstruthers.net | urls.py | Python | mit | 544 | 0.012868 | from djan | go.conf.url | s import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'blog.views.home', name='home'),
# url(r'^blog/', include('blog.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
joealcorn/berth.cc | berth/wsgi.py | Python | mit | 385 | 0.002597 | """
WSGI config for berth project.
It exposes the WSGI callable as a module-level variable na | med ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTING | S_MODULE", "berth.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
w495/python-video-shot-detector | shot_detector/charts/event/base/base_event_chart.py | Python | bsd-3-clause | 6,299 | 0.005717 | # -*- coding: utf8 -*-
"""
...
"""
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
import itertools
import logging
# PY2 & PY3 — compatibility
from builtins import map, zip
from shot_detector.handlers import BaseEventHandler, BasePlotHandler
from shot_detector.utils.multiprocessing import FuncSeqMapper
class BaseEventChart(BaseEventHandler):
"""
...
"""
__logger = logging.getLogger(__name__)
def filter_events(self, event_seq, **kwargs):
"""
Should be implemented
:param event_seq:
"""
event_seq = self.plot_events(event_seq, **kwargs)
return event_seq
def plot_events(self, event_seq, **kwargs):
"""
Should be implemented
:param event_seq:
"""
service_options = kwargs['service_options']
event_seq = self.limit_seq(
event_seq,
first=service_options.get('first_frame', 0),
last=service_options.get('last_frame', 60),
as_stream=service_options.get('as_stream', False)
)
plot_handler = BasePlotHandler(
options=service_options
)
self.__logger.debug('plot enter {}'.format(type(self).__name__))
event_seq = self.plot(
event_seq,
plot_handler,
self.seq_filters()
)
self.__logger.debug('plot exit')
return event_seq
def seq_filters(self):
"""
:return:
"""
return ()
def plot(self, src_event_seq, chart, filter_seq):
"""
:param src_event_seq:
:param chart:
:param filter_seq:
"""
src_event_seq, dst_event_seq = itertools.tee(src_event_seq)
processed_seq = self.processed_seq(src_event_seq, filter_seq)
filter_event = zip(filter_seq, processed_seq)
for filter_desc, event_seq in filter_event:
for event in event_seq:
# self.__logger.info(
# "\n<<%s>> - %s - [%s] -<%s>",
# filter_desc.name,
# event,
# event.time,
# event.feature
# )
filtered = event.feature
time = 0
if event.time:
time = float(event.time)
chart.add_data(
name=filter_desc.name,
key=(1.0 * (time - filter_desc.offset)),
value=(1.0 * filtered),
plot_options=filter_desc.plot_options
)
self.__logger.debug('chart.plot_data() enter')
chart.plot_data(show=False)
self.__logger.debug('chart.plot_data() exit')
return dst_event_seq
def processed_seq_legacy(self, src_event_seq, filter_seq):
"""
:param src_event_seq:
:param filter_seq:
:return:
"""
def to_list(seq):
"""
:param seq:
:return:
"""
return seq
def apply_filter(arg):
"""
:param arg:
:return:
"""
(filter_desc, event_seq) = arg
filter_objects = filter_desc.formula.filter_objects
event_seq = filter_objects(event_seq)
return to_list(event_seq)
filter_event = self.filter_event(
src_event_seq,
filter_seq
)
filter_event_seq = (
(fd, to_list(es)) for fd, es in filter_event
)
processed_seq = map(apply_filter, filter_event_seq)
return processed_seq
def processed_seq_simple(self, src_event_seq, filter_seq):
"""
:param src_event_seq:
:param filter_seq:
:return:
"""
event_seq_tuple = self.event_seq_tuple(
src_event_seq,
filter_seq
)
filter_event = zip(filter_seq, event_seq_tuple)
for filter_desc, event_seq in filter_event:
new_event_seq = self.apply_filter(filter_desc, event_seq)
yield new_event_seq
def filter_event(self, src_event_seq, filter_seq):
"""
:param src_event_seq:
:param filter_seq:
:return:
"""
event_seq_tuple = self.event_seq_tuple(
src_event_seq,
filter_seq
)
filter_event = zip(filter_seq, event_seq_tuple)
return filter_event
# noinspection PyMethodMayBeStatic
def event_seq_tuple(self, src_event_seq, filter_seq):
"""
:param src_event_seq:
:param filter_seq:
:return:
"""
filter_count = len(filter_seq)
event_seq_tuple = itertools.tee(src_event_seq, filter_count)
return event_seq_tuple
def apply_filter(self, filter_desc, event_seq):
"""
:param filter_desc:
:param event_seq:
:return:
"""
filter_objects = filter_desc.formula.filter_objects
events = self.event_seq_to_list(event_seq)
new_event_seq = filter_objects(events)
new_events = self.event_seq_to_list(new_event_seq)
return new_events
@staticmethod
def event_seq_to_list(seq):
"""
:param seq:
:return:
"""
return seq
| def processed_seq_future(self, src_event_seq, filter_seq):
"""
:param src_event_seq:
:param f | ilter_seq:
:return:
"""
func_seq = list(
filter_desc.formula.filter_objects_as_list
for filter_desc in filter_seq
)
func_seq_mapper = FuncSeqMapper(
caller=self
)
processed_seq = func_seq_mapper.map(
func_seq,
list(src_event_seq),
)
return processed_seq
def processed_seq(self, src_event_seq, filter_seq):
"""
:param src_event_seq:
:param filter_seq:
:return:
"""
return self.processed_seq_simple(src_event_seq, filter_seq)
|
palmtree5/Red-DiscordBot | redbot/cogs/audio/core/cog_utils.py | Python | gpl-3.0 | 4,323 | 0.000925 | from abc import ABC
from typing import Final
from base64 import b64decode
from io import BytesIO
import struct
from redbot import VersionInfo
from redbot.core import commands
from ..converters import get_lazy_converter, get_playlist_converter
__version__ = VersionInfo.from_json({"major": 2, "minor": 4, "micro": 0, "releaselevel": "final"})
__author__ = ["aikaterna", "Draper"]
_SCHEMA_VERSION: Final[int] = 3
_OWNER_NOTIFICATION: Final[int] = 1
LazyGreedyConverter = get_lazy_converter("--")
PlaylistConverter = get_playlist_converter()
HUMANIZED_PERM = {
"create_instant_invite": "Create Instant Invite",
"kick_members": "Kick Members",
"ban_members": "Ban Members",
"administrator": "Administrator",
"manage_channels": "Manage Channels",
"manage_guild": "Manage Server",
"add_reactions": "Add Reactions",
"view_audit_log": "View Audit Log",
"priority_speaker": "Priority Speaker",
"stream": "Go Live",
"read_messages": "Read Text Channels & See Voice Channels",
"send_messages": "Send Messages",
"send_tts_messages": "Send TTS Messages",
"manage_messages": "Manage Messages",
"embed_links": "Embed Links",
"attach_files": "Attach Files",
"read_message_history": "Read Message History",
"mention_everyone": "Mention @everyone, @here, and All Roles",
"external_emojis": "Use External Emojis",
"view_guild_insights": "View Server Insights",
"connect": "Connect",
"speak": "Speak",
"mute_members": "Mute Members",
"deafen_members": "Deafen Members",
"move_members": "Move Members",
"use_voice_activation": "Use Voice Activity",
"change_nickname": "Change Nickname",
"manage_nicknames": "Manage Nicknames",
"manage_roles": "Manage Roles",
"manage_webhooks": "Manage Webhooks",
"manage_emojis": "Manage Emojis",
}
class CompositeMetaClass(type(commands.Cog), type(ABC)):
"""
This allows the metaclass used for proper type detection to
coexist with discord.py's metaclass
"""
pass
# Both DataReader and DataWriter are taken from https://github.com/Devoxin/Lavalink.py/blob/master/lavalink/datarw.py
# These are licenced under MIT, Thanks Devoxin for puttin | g these together!
# The license can be found in https://github.com/Devoxin/Lavalink.py/blob/master/LICENSE
class DataReader:
def __init__(self, ts):
| self._buf = BytesIO(b64decode(ts))
def _read(self, n):
return self._buf.read(n)
def read_byte(self):
return self._read(1)
def read_boolean(self):
(result,) = struct.unpack("B", self.read_byte())
return result != 0
def read_unsigned_short(self):
(result,) = struct.unpack(">H", self._read(2))
return result
def read_int(self):
(result,) = struct.unpack(">i", self._read(4))
return result
def read_long(self):
(result,) = struct.unpack(">Q", self._read(8))
return result
def read_utf(self):
text_length = self.read_unsigned_short()
return self._read(text_length)
class DataWriter:
def __init__(self):
self._buf = BytesIO()
def _write(self, data):
self._buf.write(data)
def write_byte(self, byte):
self._buf.write(byte)
def write_boolean(self, b):
enc = struct.pack("B", 1 if b else 0)
self.write_byte(enc)
def write_unsigned_short(self, s):
enc = struct.pack(">H", s)
self._write(enc)
def write_int(self, i):
enc = struct.pack(">i", i)
self._write(enc)
def write_long(self, l):
enc = struct.pack(">Q", l)
self._write(enc)
def write_utf(self, s):
utf = s.encode("utf8")
byte_len = len(utf)
if byte_len > 65535:
raise OverflowError("UTF string may not exceed 65535 bytes!")
self.write_unsigned_short(byte_len)
self._write(utf)
def finish(self):
with BytesIO() as track_buf:
byte_len = self._buf.getbuffer().nbytes
flags = byte_len | (1 << 30)
enc_flags = struct.pack(">i", flags)
track_buf.write(enc_flags)
self._buf.seek(0)
track_buf.write(self._buf.read())
self._buf.close()
track_buf.seek(0)
return track_buf.read()
|
EnEff-BIM/EnEffBIM-Framework | SimModel_Python_API/simmodel_swig/Release/SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.py | Python | mit | 9,034 | 0.00642 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_SimAppObjNameDefault_DistributionSystem_HvacSteamLoop', [dirname(__file__)])
except ImportError:
import _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop
return _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop
if fp is not None:
try:
_mod = imp.load_module('_SimAppObjNameDefault_DistributionSystem_HvacSteamLoop', fp, pathname, description)
finally:
fp.close()
return _mod
_SimAppObjNameDefault_DistributionSystem_HvacSteamLoop = swig_import_helper()
del swig_import_helper
else:
import _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
try:
import weakref
weakref_proxy = weakref.proxy
except:
weakref_proxy = lambda x: x
import base
import SimAppObjNameDefault_DistributionSystem_HvacAirLoop
import SimAppObjNameDefault_BldgComponentGroup_HvacComponent
class SimAppObjNameDefault_DistributionSystem_HvacSteamLoop(SimAppObjNameDefault_DistributionSystem_HvacAirLoop.SimAppObjNameDefault_DistributionSystem):
__swig_setmethods__ = {}
for _s in [SimAppObjNameDefault_DistributionSystem_HvacAirLoop.SimAppObjNameDefault_DistributionSystem]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimAppObjNameDefault_DistributionSystem_HvacSteamLoop, name, value)
__swig_getmethods__ = {}
for _s in [SimAppObjNameDefault_DistributionSystem_HvacAirLoop.SimAppObjNameDefault_DistributionSystem]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimAppObjNameDefault_DistributionSystem_HvacSteamLoop, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.new_SimAppObjNameDefault_DistributionSystem_HvacSteamLoop(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.SimAppObjNameDefault_DistributionSystem_HvacSteamLoop__clone(self, f, c)
__swig_destroy__ = _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.delete_SimAppObjNameDefault_DistributionSystem_HvacSteamLoop
__del__ = lambda self: None
SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_swigregister = _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_swigregister
SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_swigregister(SimAppObjNameDefault_DistributionSystem_HvacSteamLoop)
class SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_sequence(base.sequence_common):
__swig_setmethods__ = {}
for _s in [base.sequence_common]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_sequence, name, value)
__swig_getmethods__ = {}
for _s in [base.sequence_common]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = | lambda self, name: _swig_getattr(self, SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_sequence, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.new_SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_sequence(*args)
try:
self.this.append(this)
except:
self.this = this
def assign(self, n, x):
return | _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_sequence_assign(self, n, x)
def begin(self, *args):
return _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_sequence_begin(self, *args)
def end(self, *args):
return _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_sequence_end(self, *args)
def rbegin(self, *args):
return _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_sequence_rbegin(self, *args)
def rend(self, *args):
return _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_sequence_rend(self, *args)
def at(self, *args):
return _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_sequence_at(self, *args)
def front(self, *args):
return _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_sequence_front(self, *args)
def back(self, *args):
return _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_sequence_back(self, *args)
def push_back(self, *args):
return _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_sequence_push_back(self, *args)
def pop_back(self):
return _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_sequence_pop_back(self)
def detach_back(self, pop=True):
return _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_sequence_detach_back(self, pop)
def insert(self, *args):
return _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_sequence_insert(self, *args)
def erase(self, *args):
return _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.SimAppObjNameDefault_DistributionSystem_HvacSteamLoop_sequence_erase(self, *args)
def detach(self, position, r, erase=True):
return _SimAppObjNameDefault_DistributionSystem_HvacSteamLoop.SimAppObjNam |
nathanhi/deepserve | deepserve/urls.py | Python | mit | 328 | 0 | # -*- coding: utf-8 -*-
from django.conf.urls import include, url
from django.contrib import admin
from deepserve.blag.views import index
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('^markdown/', include('django_markdown.urls')),
url(r'^$', index),
url( | r'^blag/', inc | lude('deepserve.blag.urls')),
]
|
andreiavram/organizer | organizer/urls.py | Python | mit | 637 | 0.00157 | f | rom django.urls import path, include
from django.contrib import admin
from rest_framework.routers import DefaultRouter
from tasks.views import TaskItemViewSet, MainAppView, TagViewSet, ProjectViewSet, TaskCommentViewSet
admin.autodiscover()
router = DefaultRouter()
router.register(r'task', TaskItemViewSet)
router.register(r't | ag', TagViewSet)
router.register(r'project', ProjectViewSet)
router.register(r'comments', TaskCommentViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
path('', MainAppView.as_view(), {}, "index"),
path('api/', include(router.urls)),
path('rest-auth/', include('rest_auth.urls'))
]
|
akrherz/iem | htdocs/plotting/auto/scripts100/p182.py | Python | mit | 6,994 | 0 | """Precip estimates"""
import datetime
import os
import numpy as np
import geopandas as gpd
from pyiem import iemre, util
from pyiem.plot import figure_axes
from pyiem.reference import state_names
from pyiem.grid.zs import CachingZonalStats
from pyiem.exceptions import NoDataFound
def get_description():
"""Return a dict describing how to call this plotter"""
desc = {}
desc["data"] = False
desc[
"description"
] = """This application attempts to assess the
effectiveness of a calendar day's rainfall based on where the rain fell
in relation to a previous period of days departure from climatology. So
for a given date and state, the areal coverage of daily precipitation
at some given threshold is compared against the departure from climatology
over some given number of days. The intention is to answer a question like
how much of the rain on a given day fell on an area that needed it! The
areal coverage percentages are relative to the given state.
"""
today = datetime.datetime.today() - datetime.timedelta(days=1)
desc["arguments"] = [
dict(
type="csector", name="sector", default="IA", label="Select Sector:"
),
dict(
type="date",
name="date",
default=today.strftime("%Y/%m/%d"),
label="Date:",
min="2011/01/01",
),
dict(
type="int",
name="trailing",
default=31,
label="Over how many trailing days to compute departures?",
),
dict(
type="float",
name="threshold",
default=0.1,
label="Date Precipitation Threshold (inch)",
),
]
return desc
def plotter(fdict):
"""Go"""
ctx = util.get_autoplot_context(fdict, get_description())
date = ctx["date"]
sector = ctx["sector"]
days = ctx["trailing"]
threshold = ctx["threshold"]
window_sts = date - datetime.timedelta(days=days)
if window_sts.year != date.year:
raise NoDataFound("Sorry, do not support multi-year plots yet!")
if len(sector) != 2:
raise NoDataFound(
"Sorry, this does not support multi-state plots yet."
)
idx0 = iemre.daily_offset(window_sts)
idx1 = iemre.daily_offset(date)
ncfn = iemre.get_daily_mrms_ncname(date.year)
ncvar = "p01d"
if not os.path.isfile(ncfn):
raise NoDataFound("No data for that year, sorry.")
# Get the state weight
with util.get_sqlalchemy_conn("postgis") as conn:
df = gpd.GeoDataFrame.from_postgis(
"SELECT the_geom from states where state_abbr = %s",
conn,
params=(sector,),
index_col=None,
geom_col="the_geom",
)
czs = CachingZonalStats(iemre.MRMS_AFFINE)
with util.ncopen(ncfn) as nc:
czs.gen_stats(
np.zeros((nc.variables["lat"].size, nc.variables["lon"].size)),
df["the_geom"],
)
hasdata = None
jslice = None
islice = None
for nav in czs.gridnav:
hasdata = np.ones((nav.ysz, nav.xsz))
hasdata[nav.mask] = 0.0
# careful here as y is flipped in this context
jslice = slice(
nc.variables["lat"].size - (nav.y0 + nav.ysz),
nc.variables["lat"].size - nav.y0,
)
islice = slice(nav.x0, nav.x0 + nav.xsz | )
hasdata = np.flipud | (hasdata)
today = util.mm2inch(nc.variables[ncvar][idx1, jslice, islice])
if (idx1 - idx0) < 32:
p01d = util.mm2inch(
np.sum(nc.variables[ncvar][idx0:idx1, jslice, islice], 0)
)
else:
# Too much data can overwhelm this app, need to chunk it
for i in range(idx0, idx1, 10):
i2 = min([i + 10, idx1])
if idx0 == i:
p01d = util.mm2inch(
np.sum(nc.variables[ncvar][i:i2, jslice, islice], 0)
)
else:
p01d += util.mm2inch(
np.sum(nc.variables[ncvar][i:i2, jslice, islice], 0)
)
# Get climatology
with util.ncopen(iemre.get_dailyc_mrms_ncname()) as nc:
if (idx1 - idx0) < 32:
c_p01d = util.mm2inch(
np.sum(nc.variables[ncvar][idx0:idx1, jslice, islice], 0)
)
else:
# Too much data can overwhelm this app, need to chunk it
for i in range(idx0, idx1, 10):
i2 = min([i + 10, idx1])
if idx0 == i:
c_p01d = util.mm2inch(
np.sum(nc.variables[ncvar][i:i2, jslice, islice], 0)
)
else:
c_p01d += util.mm2inch(
np.sum(nc.variables[ncvar][i:i2, jslice, islice], 0)
)
# we actually don't care about weights at this fine of scale
cells = np.sum(np.where(hasdata > 0, 1, 0))
departure = p01d - c_p01d
# Update departure and today to values unconsidered below when out of state
departure = np.where(hasdata > 0, departure, -9999)
today = np.where(hasdata > 0, today, 0)
ranges = [
[-99, -3],
[-3, -2],
[-2, -1],
[-1, 0],
[0, 1],
[1, 2],
[2, 3],
[3, 99],
]
x = []
x2 = []
labels = []
for (minv, maxv) in ranges:
labels.append(f"{minv:.0f} to {maxv:.0f}")
# How many departure cells in this range
hits = np.logical_and(departure < maxv, departure > minv)
hits2 = np.logical_and(hits, today > threshold)
x.append(np.sum(np.where(hits, 1, 0)) / float(cells) * 100.0)
x2.append(np.sum(np.where(hits2, 1, 0)) / float(cells) * 100.0)
title = (
f"{state_names[sector]} NOAA MRMS {date:%-d %b %Y} "
f"{threshold:.2f} inch Precip Coverage"
)
(fig, ax) = figure_axes(apctx=ctx, title=title)
ax.bar(
np.arange(8) - 0.2,
x,
align="center",
width=0.4,
label=f"Trailing {days} Day Departure",
)
ax.bar(
np.arange(8) + 0.2,
x2,
align="center",
width=0.4,
label=f"{date:%-d %b %Y} Coverage ({sum(x2):.1f}% Tot)",
)
for i, (_x1, _x2) in enumerate(zip(x, x2)):
ax.text(i - 0.2, _x1 + 1, f"{_x1:.1f}", ha="center")
ax.text(i + 0.2, _x2 + 1, f"{_x2:.1f}", ha="center")
ax.set_xticks(np.arange(8))
ax.set_xticklabels(labels)
ax.set_xlabel(f"Trailing {days} Day Precip Departure [in]")
ax.set_position([0.1, 0.2, 0.8, 0.7])
ax.legend(loc=(0.0, -0.2), ncol=2)
ax.set_ylabel(f"Areal Coverage of {state_names[sector]} [%]")
ax.grid(True)
ax.set_xlim(-0.5, 7.5)
ax.set_ylim(0, max([max(x2), max(x)]) + 5)
return fig
if __name__ == "__main__":
plotter({})
|
rohitranjan1991/home-assistant | homeassistant/components/zerproc/__init__.py | Python | mit | 1,353 | 0.001478 | """Zerproc lights integration."""
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers.typing import ConfigType
from .const import DATA_ADDRESSES, DATA_DISCOVERY_SUBSCRIPTION, DOMAIN
PLATFORMS = [Platform.LIGHT]
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Zerproc platform."""
hass.async_create_task(
hass.config_entries.flow.async_init(DOMAIN, context={"source": SOURCE_IMPORT})
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Zerproc from a config entry."""
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
if DATA_ADDRESSES not in hass.data[DOMAIN]:
hass.data[DOMAIN][DATA_ADDRESSES] = set()
hass.config_entries.async_setup_platforms(e | ntry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
# Stop discovery
unregister_discovery = hass.data[DOMAIN].pop(DATA_DISCOVERY_SUBSCRIPTION, None)
if unregister_discover | y:
unregister_discovery()
hass.data.pop(DOMAIN, None)
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
|
iot-salzburg/self-driving-slotcar | gym-slotcar/setup.py | Python | apache-2.0 | 185 | 0.010811 | from setuptools import setup
setup(name='gym_slotcar',
version='0.0.1',
install | _requires=['gym'] # And any other dependencies foo needs [ | add stuff like numpy etc.]
) |
aurelieladier/openturns | python/test/t_ExponentialFactory_std.py | Python | lgpl-3.0 | 1,511 | 0.000662 | #! /usr/bin/env python
from __future__ import print_function
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
distribution = Exponential(2.5, -1.3)
size = 10000
sample = distribution.getSample(size)
factory = ExponentialFactory()
estimatedDistribution = factory.build(sample)
print("distribution=", repr(distribution))
print("Estimated distribution=", repr(estimatedDistribution))
estimatedDistribution = factory.build()
print("Default distribution=", estimatedDistribution)
estimatedDistribution = factory.build(
distribution.getParameter())
print("Distribution from parameters=", estimatedDistribution)
estimatedExponential = factory.buildAsExponential(sample)
print("Exponential =", distribution)
print("Estimated exponential=", estimatedExponential)
estimatedExponential = factory.buildAsExponential()
print("Default exponential=", estimatedExponential)
estimatedExponential = factory.buildAsExponential(
distribution.getParameter())
print("Exponential from parameters=", estimatedExponential)
sample = [[0.0]] * size
estimatedDistribution = factory.build(sample)
print("Estimated distribution=", repr(es | timatedDistribution))
sample = [[1.0]] * size
estimatedDistribution = factory.build(sample)
print("Estimated distribution=", repr(estimatedDistribution))
except:
import sys
print("t_ExponentialFactory_std.py", sys.exc_info()[0], sys.exc_info()[1] | )
|
KnightOS/packages.knightos.org | packages/config.py | Python | mit | 619 | 0.004847 | import logging
try:
from configparser import ConfigParser
except Im | portError:
# Python 2 support
from ConfigParser import ConfigParser
logger = logging.getLogger("packges.knightos.org")
logger.setLevel(logging.DEBUG)
sh = logging.StreamHand | ler()
sh.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
sh.setFormatter(formatter)
logger.addHandler(sh)
# scss logger
logging.getLogger("scss").addHandler(sh)
config = ConfigParser()
config.readfp(open('config.ini'))
env = 'dev'
_cfg = lambda k: config.get(env, k)
_cfgi = lambda k: int(_cfg(k))
|
h2oai/sparkling-water | py/src/ai/h2o/sparkling/ml/algos/H2OAutoMLExtras.py | Python | apache-2.0 | 1,464 | 0.001366 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The A | SF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the speci | fic language governing permissions and
# limitations under the License.
#
from pyspark.sql import SparkSession
from pyspark.sql.dataframe import DataFrame
from ai.h2o.sparkling.ml.models.H2OMOJOModel import H2OMOJOModelFactory
class H2OAutoMLExtras:
def getLeaderboard(self, *extraColumns):
if len(extraColumns) == 1 and isinstance(extraColumns[0], list):
extraColumns = extraColumns[0]
leaderboard_java = self._java_obj.getLeaderboard(extraColumns)
return DataFrame(leaderboard_java, SparkSession.builder.getOrCreate()._wrapped)
def getAllModels(self):
javaModels = self._java_obj.getAllModels()
return [H2OMOJOModelFactory.createSpecificMOJOModel(javaModel) for javaModel in javaModels]
|
valerymelou/cookiecutter-django-gulp | setup.py | Python | bsd-3-clause | 1,852 | 0.00108 | #!/usr/bin/env python
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# Our version ALWAYS matches the version of Django we support
# If Django has a new release, we branch, tag, then update this setting after the tag.
version = '1.10.1'
if sys.argv[-1] == 'tag':
os.system('git tag -a %s -m "version %s"' % (version, version))
os.system('git push --tags')
sys.exit()
with open('README.rst') as readme_file:
long_description = readme_file.read()
setup(
name='cookiecutter-django-gulp',
version=version,
description='A Cookiecutter template for integrating Gulp in Django projects.',
long_description=long_description,
author='Valery Melou',
author_email='valerymelou@gmail.com',
url='https://github.com/valerymelou/cookiecutter-django-gulp',
packa | ges=[],
license='BSD',
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Framework :: Django :: 1.10',
'Intended Audience :: Developers',
'Natural Language :: English',
'Lic | ense :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development',
],
keywords=(
'cookiecutter, Python, projects, project templates, django, '
'gulp, compass, sass, minification, skeleton, scaffolding, '
'project directory, setup.py'
),
)
|
arubertoson/piemenu | piemenu/aforms/core/main.py | Python | gpl-2.0 | 1,524 | 0.001312 |
""" Module main
Contains the main frame and implements the main window.
"""
import os
import platform
from PySide import QtCore, QtGui
import aforms
from aforms.codeeditor import CodeEditor
from aforms.codeeditor import python_highlighting
# from app import style_rc
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.setMainTitle()
# self.setWindowIcon()
self.resize(800, 600)
# self.restoreGeometry()
window_icon = aforms.icon['window']
self.setWindowIcon(window_icon)
self.show()
self._populate()
def _populate(self):
from aforms.core.fields import TextField
widget = CodeEditor()
self.setCentralWidget(widget)
# widget = QtGui.QLabel('Hello darr!')
# widget = QtGui.QPushButton('Yay')
# t = QtGui.QIcon(':icons/toolbar_command.png')
# widget.setIcon(t)
# widget.setPixmap(t)
# sel | f.setCentralWidget(widget)
# self._shellDock = dock = QtGui.QDockWidget(self)
# dock.setObjectName('shells')
|
# dock.setWindowTitle('Shells')
# self.addDockWidget(QtCore.Qt.TopDockWidgetArea, dock)
def setMainTitle(self):
title = 'Command System'
self.setWindowTitle(title)
def closeEvent(self, event):
# implement save config
QtGui.QMainWindow.closeEvent(self, event)
|
germn/python-for-android | setup.py | Python | mit | 4,634 | 0.001079 |
import glob
from io import open # for open(..,encoding=...) parameter in python 2
from os import walk
from os.path import join, dirname, sep
import re
from setuptools import setup, find_packages
# NOTE: All package data should also be set in MANIFEST.in
packages = find_packages()
package_data = {'': ['*.tmpl',
'*.patch', ], }
data_files = []
# must be a single statement since buildozer is currently parsing it, refs:
# https://github.com/kivy/buildozer/issues/722
install_reqs = [
'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',
'enum34; python_version<"3.4"', 'sh>=1.10; sys_platform!="nt"',
'pep517<0.7.0"', 'toml',
]
# (pep517 and toml are used by pythonpackage.py)
# By specifying every file manually, package_data will be able to
# include them in binary distributions. Note that we have to add
# everything as a 'pythonforandroid' rule, using '' apparently doesn't
# work.
def recursively_include(results, directory, patterns):
for root, subfolders, files in walk(directory):
for fn in files:
if not any(glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns):
continue
filename = join(root, fn)
directory = 'pythonforandroid'
if directory not in results:
results[directory] = []
results[directory].append(join(*filename.split(sep)[1:]))
recursively_include(package_data, 'pythonforandroid/recipes',
['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',
'*.mk', '*.jam', ])
recursively_include(package_data, 'pythonforandroid/bootstraps',
['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',
'*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl',
'*.gradle', '.gitkeep', 'gradlew*', '*.jar', "*.patch", ])
recursively_include(package_data, 'pythonforandroid/bootstraps',
['sdl-config', ])
recursively_include(package_data, 'pythonforandroid/bootstraps/webview',
['*.html', ])
recursively_include(package_data, 'pythonforandroid',
['liblink', 'biglink', 'liblink.sh'])
with open(join(dirname(__file__), 'README.md'),
encoding="utf-8",
errors="replace",
) as fileh:
long_description = fileh.read()
init_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py')
version = None
try:
with open(init_filen,
encoding="utf-8",
errors="replace"
) as fileh:
lines = fileh.readlines()
except IOError:
pass
else:
for line in lines:
line = line.strip()
if line.startswith('__version__ = '):
matches = re.findall(r'["\'].+["\']', line)
if matches:
version = matches[0].strip("'").strip('"')
break
if version is None:
raise Exception('Error: version could not be loaded from {}'.format(init_filen))
setup(name='python-for-android',
version=version,
description='Android APK packager for Python scripts and apps',
long_description=long_description,
long_description_content_type='text/markdown',
python_requires=">=3.6.0",
author='The Kivy team',
author_email='kivy-dev@googlegroups.com',
url='https://github.com/kivy/python-for-android',
license='MIT',
install_requires=install_reqs,
entry_points={
'console_scripts': [
'python-for-android = pythonforandroid.entrypoints:main',
'p4a = pythonforandroid.entrypoints:main',
],
'distutils.commands': [
'apk = pythonforandroid.bdistapk:BdistAPK',
'aar = pythonforandroid.bdistapk:BdistAAR',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Operating System :: OS Independent',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Android',
'Programming Language :: C',
'Programming | Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming L | anguage :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development',
'Topic :: Utilities',
],
packages=packages,
package_data=package_data,
)
|
allenai/allennlp | allennlp/modules/stacked_bidirectional_lstm.py | Python | apache-2.0 | 6,459 | 0.002632 | from typing import Optional, Tuple, List
import torch
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.modules.input_variational_dropout import InputVariationalDropout
from allennlp.common.checks import ConfigurationError
TensorPair = Tuple[torch.Tensor, torch.Tensor]
class StackedBidirectionalLstm(torch.nn.Module):
"""
A standard stacked Bidirectional LSTM where the LSTM layers
are concatenated between each layer. The only difference between
this and a regular bidirectional LSTM is the application of
variational dropout to the hidden states and outputs of each layer apart
from the last layer of the LSTM. Note that this will be slower, as it
doesn't use CUDNN.
[0]: https://arxiv.org/abs/1512.05287
# Parameters
input_size : `int`, required
The dimension of the inputs to the LSTM.
hidden_size : `int`, required
The dimension of the outputs of the LSTM.
num_layers : `int`, required
The number of stacked Bidirectional LSTMs to use.
recurrent_dropout_probability : `float`, optional (default = `0.0`)
The recurrent dropo | ut probability to be used in a dropout scheme as
stated in [A Theoretically Grounded Application of Dropout in Recurrent
Neural Networks][0].
layer_dropout_probab | ility : `float`, optional (default = `0.0`)
The layer wise dropout probability to be used in a dropout scheme as
stated in [A Theoretically Grounded Application of Dropout in Recurrent
Neural Networks][0].
use_highway : `bool`, optional (default = `True`)
Whether or not to use highway connections between layers. This effectively involves
reparameterising the normal output of an LSTM as::
gate = sigmoid(W_x1 * x_t + W_h * h_t)
output = gate * h_t + (1 - gate) * (W_x2 * x_t)
"""
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
layer_dropout_probability: float = 0.0,
use_highway: bool = True,
) -> None:
super().__init__()
# Required to be wrapped with a `PytorchSeq2SeqWrapper`.
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bidirectional = True
layers = []
lstm_input_size = input_size
for layer_index in range(num_layers):
forward_layer = AugmentedLstm(
lstm_input_size,
hidden_size,
go_forward=True,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=False,
)
backward_layer = AugmentedLstm(
lstm_input_size,
hidden_size,
go_forward=False,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=False,
)
lstm_input_size = hidden_size * 2
self.add_module("forward_layer_{}".format(layer_index), forward_layer)
self.add_module("backward_layer_{}".format(layer_index), backward_layer)
layers.append([forward_layer, backward_layer])
self.lstm_layers = layers
self.layer_dropout = InputVariationalDropout(layer_dropout_probability)
def forward(
self, inputs: PackedSequence, initial_state: Optional[TensorPair] = None
) -> Tuple[PackedSequence, TensorPair]:
"""
# Parameters
inputs : `PackedSequence`, required.
A batch first `PackedSequence` to run the stacked LSTM over.
initial_state : `Tuple[torch.Tensor, torch.Tensor]`, optional, (default = `None`)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. Each tensor has shape (num_layers, batch_size, output_dimension * 2).
# Returns
output_sequence : `PackedSequence`
The encoded sequence of shape (batch_size, sequence_length, hidden_size * 2)
final_states: `torch.Tensor`
The per-layer final (state, memory) states of the LSTM, each with shape
(num_layers * 2, batch_size, hidden_size * 2).
"""
if initial_state is None:
hidden_states: List[Optional[TensorPair]] = [None] * len(self.lstm_layers)
elif initial_state[0].size()[0] != len(self.lstm_layers):
raise ConfigurationError(
"Initial states were passed to forward() but the number of "
"initial states does not match the number of layers."
)
else:
hidden_states = list(zip(initial_state[0].split(1, 0), initial_state[1].split(1, 0)))
output_sequence = inputs
final_h = []
final_c = []
for i, state in enumerate(hidden_states):
forward_layer = getattr(self, "forward_layer_{}".format(i))
backward_layer = getattr(self, "backward_layer_{}".format(i))
# The state is duplicated to mirror the Pytorch API for LSTMs.
forward_output, final_forward_state = forward_layer(output_sequence, state)
backward_output, final_backward_state = backward_layer(output_sequence, state)
forward_output, lengths = pad_packed_sequence(forward_output, batch_first=True)
backward_output, _ = pad_packed_sequence(backward_output, batch_first=True)
output_sequence = torch.cat([forward_output, backward_output], -1)
# Apply layer wise dropout on each output sequence apart from the
# first (input) and last
if i < (self.num_layers - 1):
output_sequence = self.layer_dropout(output_sequence)
output_sequence = pack_padded_sequence(output_sequence, lengths, batch_first=True)
final_h.extend([final_forward_state[0], final_backward_state[0]])
final_c.extend([final_forward_state[1], final_backward_state[1]])
final_h = torch.cat(final_h, dim=0)
final_c = torch.cat(final_c, dim=0)
final_state_tuple = (final_h, final_c)
return output_sequence, final_state_tuple
|
jcornford/pyecog | pyecog/ndf/__init__.py | Python | mit | 880 | 0.009091 | from .ndfconverter import NdfFile
from .h5loader import H5File
from .datahandler import DataHandler
from .classifier import FeaturePreProcesser
from .classifier import Classifier
from .classifier import load_classifier
#from .bokeh_visualisation import plot
#from .bokeh_visualisation import basic_plot
from .feature_extractor import FeatureExtractor
from . import classifier_utils
import os
import logging
#try:
# logging.info('Re-intted')
#except:
logger = logging.getLogger()
logpath = os.getcwd()
#fhandler = logging.FileHandler(filename=os.path.join(os.path.split(logpath)[0], 'Datahandler.log'), mode='w')
fhandler = logging.FileHandler(filename=os.path.join(logpath, 'PyECoG_logfile.log'), mode='a+')
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fhandler.setFormatter(formatter)
lo | gger.addHandler(fhandler)
logger | .setLevel(logging.DEBUG) |
mikekap/batchy | tests/runloop_tests.py | Python | apache-2.0 | 9,494 | 0.002001 | import sys
from unittest.case import SkipTest
from batchy.compat import PY3
from batchy.local import RunLoopLocal
from batchy.runloop import coro_return, runloop_coroutine, deferred, future, current_run_loop, wait
from . import BaseTestCase
@runloop_coroutine()
def increment(arg):
coro_return(arg + 1)
yield
@runloop_coroutine()
def add_2(arg):
arg = yield increment(arg)
arg = yield increment(arg)
coro_return(arg)
yield
@runloop_coroutine()
def return_none():
coro_return(None)
yield
@runloop_coroutine()
def raise_value_error():
raise ValueError()
yield # pylint: disable-msg=W0101
@runloop_coroutine()
def block_loop(n):
"""Blocks the run loop for n iterations."""
d = yield deferred()
cnt = [n]
def unblock(_):
cnt[0] -= 1
if cnt[0] == 0:
d.set_value(1)
with current_run_loop().on_queue_exhausted.connected_to(unblock):
yield d
class RunLoopTests(BaseTestCase):
def test_simple_runnable(self):
self.assert_equal(1, increment(0))
self.assert_equal(2, increment(1))
def test_simple_runnable_py3(self):
if not PY3:
raise SkipTest()
exec("""
@runloop_coroutine()
def increment_py3(arg):
return arg + 1
yield
""", locals(), globals())
self.assert_equal(1, increment_py3(0))
self.assert_equal(2, increment_py3(1))
def test_dependencies(self):
self.assert_equal(2, add_2(0))
self.assert_equal(3, add_2(1))
def test_list_dependencies(self):
@runloop_coroutine()
def add_2_parallel(arg):
arg1, arg2 = yield increment(arg), increment(0)
coro_return(arg1+arg2)
self.assert_equal(2, add_2_parallel(0))
self.assert_equal(3, add_2_parallel(1))
def test_list_dependency_ordering(self):
result = []
@runloop_coroutine()
def append(x):
result.append(x)
yield
@runloop_coroutine()
def test():
yield [append(x) for x in range(100)]
test()
self.assert_equals(list(range(100)), result)
def test_dict_dependencies(self):
@runloop_coroutine()
def add_2_dict(arg):
d = yield {'a': increment(arg), 'b': increment(0), 'c': return_none()}
self.assert_equal(None, d['c'])
coro_return(d['a'] + d['b'])
self.assert_equal(2, add_2_dict(0))
self.assert_equal(3, add_2_dict(1))
def test_no_d | ependencies(self):
@run | loop_coroutine()
def coro():
yield
self.assert_equals(None, coro())
def test_local(self):
local = RunLoopLocal()
@runloop_coroutine()
def test():
local.hi = getattr(local, 'hi', 0) + 1
local.hello = 'boo'
del local.hello
coro_return((local.hi, getattr(local, 'hello', None)))
yield
def set_something():
local.hi = 1
self.assert_raises(RuntimeError, set_something)
self.assert_equals((1, None), test())
self.assert_equals((1, None), test())
def test_exception(self):
@runloop_coroutine()
def test(a):
try:
yield raise_value_error()
except ValueError:
v = yield increment(a)
coro_return(v)
self.assert_equals(2, test(1))
self.assert_equals(3, test(2))
def test_multiple_exception(self):
@runloop_coroutine()
def test(a):
err = raise_value_error()
try:
yield err, block_loop(1)
except ValueError:
v = yield increment(a)
coro_return(v)
self.assert_equals(2, test(1))
self.assert_equals(3, test(2))
def test_deferred_simple(self):
obj = [None]
@runloop_coroutine()
def task():
obj[0] = d = yield deferred()
if __debug__:
self.assert_raises(ValueError, d.get)
v = yield d
coro_return(v)
def set_value(_):
obj[0].set_value(3)
@runloop_coroutine()
def test():
with current_run_loop().on_queue_exhausted.connected_to(set_value):
v = yield task()
coro_return(v)
self.assert_equal(3, test())
def test_deferred_easy(self):
obj = [None]
@runloop_coroutine()
def task():
obj[0] = d = yield deferred()
d.set_value(3)
v = yield d
coro_return(v)
@runloop_coroutine()
def test():
v = yield task()
coro_return(v)
self.assert_equal(3, test())
def test_deferred_exception(self):
obj = [None]
@runloop_coroutine()
def task():
obj[0] = d = yield deferred()
v = yield d
coro_return(v)
def set_value(_):
try:
raise ValueError()
except ValueError:
obj[0].set_exception(*sys.exc_info())
@runloop_coroutine()
def test():
with current_run_loop().on_queue_exhausted.connected_to(set_value):
v = yield task()
coro_return(v)
@runloop_coroutine()
def test2():
with current_run_loop().on_queue_exhausted.connected_to(set_value):
x = test()
try:
yield x
except ValueError:
coro_return(1)
self.assert_raises(ValueError, test)
self.assert_equals(1, test2())
def test_block_loop(self):
total_iterations = [0]
def inc_total_iterations(_):
total_iterations[0] += 1
@runloop_coroutine()
def test():
with current_run_loop().on_iteration.connected_to(inc_total_iterations):
yield block_loop(1)
yield block_loop(1)
yield block_loop(1)
coro_return(1)
self.assert_equal(1, test())
self.assert_equal(total_iterations[0], 4-1) # the first loop isn't counted.
def test_future(self):
total_iterations = [0]
def inc_total_iterations(_):
total_iterations[0] += 1
@runloop_coroutine()
def test():
with current_run_loop().on_iteration.connected_to(inc_total_iterations):
v1 = yield future(block_loop(1))
v2 = yield future(block_loop(1))
v3 = yield future(block_loop(1))
self.assert_equal(0, total_iterations[0])
yield v1, v2, v3
coro_return(1)
self.assert_equal(1, test())
self.assert_equal(total_iterations[0], 2-1)
def test_future_exception(self):
total_iterations = [0]
def inc_total_iterations(_):
total_iterations[0] += 1
@runloop_coroutine()
def test():
with current_run_loop().on_iteration.connected_to(inc_total_iterations):
exc = yield future(raise_value_error())
v1 = yield future(block_loop(1))
v2 = yield future(block_loop(1))
v3 = yield future(block_loop(1))
self.assert_equal(0, total_iterations[0])
try:
yield exc
except ValueError:
self.assert_equal(0, total_iterations[0])
yield v1, v2, v3
coro_return(1)
self.assert_equal(1, test())
self.assert_equal(total_iterations[0], 2-1)
def test_future_exception_ignore(self):
@runloop_coroutine()
def test():
exc, _, _ = yield future(raise_value_error()), future(block_loop(1)), future(block_loop(1))
try:
yield exc
except ValueError:
raise
self.assert_raises(ValueError, test)
def test_ready_wait(self):
@runloop_coroutine()
def test():
d1, d2, d3 = yield deferred(), def |
Aurorastation/BOREALISbot2 | core/subsystems/__init__.py | Python | agpl-3.0 | 102 | 0.009804 | from .api import API, ApiMethods
from .config import Config
__all__ | = ["API", "ApiMethods", "C | onfig"] |
theju/django-comments-apps | recaptcha_comments/fields.py | Python | mit | 3,024 | 0.010913 | from django import forms
from django.conf import settings
from django.utils.safestring import mark_safe
from d | jango.utils.translation import ugettext as _
import urllib2, urllib
VERIFY_SERVER="http://api-verify.recaptcha.net/verify"
class RecaptchaWidget(forms.Widget):
def render(self, name, value, attrs=None):
return mark_safe("""<script type="text/javascript"
src="http://api.recaptcha.net/challenge?k=%(public_key)s"></script>
<noscript>
<iframe src="http://api.recaptcha.net/no | script?k=%(public_key)s"
height="300" width="500" frameborder="0"></iframe><br>
<textarea name="recaptcha_challenge_field" rows="3" cols="40">
</textarea>
<input type="hidden" name="recaptcha_response_field" value="manual_challenge">
</noscript>""" %({'public_key': settings.RECAPTCHA_PUBLIC_KEY}))
def value_from_datadict(self, data, files, name):
return {
'recaptcha_challenge_field': data.get('recaptcha_challenge_field', None),
'recaptcha_response_field' : data.get('recaptcha_response_field', None),
'remoteip' : data.get('remoteip', None)
}
class RecaptchaField(forms.Field):
default_error_messages = {"unknown": _("Unknown error."),
"invalid-site-public-key": _("Unable to verify public key."),
"invalid-site-private-key": _("Unable to verify private key."),
"invalid-request-cookie": _("The challenge parameter was filled incorrectly."),
"incorrect-captcha-sol": _("Invalid Captcha solution."),
"verify-params-incorrect": _("Make sure you are passing all the required parameters."),
"invalid-referrer": _("Invalid Referrer. Enter the correct keys for this domain"),
"recaptcha-not-reachable": _("The reCaptcha site seems to be down. Sorry!!!")}
widget = RecaptchaWidget
def verify(self, data):
captcha_req = urllib2.Request(VERIFY_SERVER,
data=urllib.urlencode({'privatekey': settings.RECAPTCHA_PRIVATE_KEY,
'remoteip' : data['remoteip'],
'challenge' : data['recaptcha_challenge_field'],
'response' : data['recaptcha_response_field'],}))
try:
response = urllib2.urlopen(captcha_req)
except urllib2.URLError,e :
raise forms.ValidationError(e)
resp_content = response.readlines()
return_code = resp_content[0].strip()
error = resp_content[1].strip()
if not return_code == "true":
raise forms.ValidationError(self.error_messages.get(error) or error)
|
roy-boy/python_scripts | th_logger.py | Python | gpl-3.0 | 447 | 0.002237 | #!C:\Python27 | \
"" | "th_logger.py holds logging handler and config for the Regression test"""
import logging
from testProperty import TEST_OUTPUT_PATH
test_logger = logging.getLogger('TEST_HARNESS')
handler = logging.FileHandler(TEST_OUTPUT_PATH + 'runTest.log')
formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-6s %(message)s')
handler.setFormatter(formatter)
test_logger.addHandler(handler)
test_logger.setLevel(logging.DEBUG)
|
quantopian/PenguinDome | client/submit.py | Python | apache-2.0 | 1,850 | 0 | #!/usr/bin/env python3
# Quantopian, Inc. licenses this file to you under the Apache License, Version
# 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import glob
import os
from requests.exceptions import HTTPError, ReadTimeout
import sys
from penguindome import top_dir, collected_dir, set_gpg
from penguindome.client import get_logger, server_request
log = get_logger('submit')
os.chdir(top_dir)
set_gpg('client')
for collected in sorted(glob.glob(os.path.join(collected_dir, '*[0-9]'))):
# This nested try/except is that we don't have to duplicate the code twice,
# one for unrecognized HTTPError exceptiosns and again for ReadTimeout.
try:
try:
server_request('/penguindome/v1/submit', da | ta_path=collected,
exit_on_connection_error=True, logger=log)
except HTTPError as e:
if e.response.status_code == 400:
log.error('Server returned status code 400. '
'Renaming {} to {}.bad.', collected, collected)
os.rename(collected, collected + '.bad')
sys.exit(1)
raise |
except (HTTPError, ReadTimeout) as e:
log.error('Submit failed: {}', str(e))
log.debug('Traceback of failed submission', exc_info=sys.exc_info())
sys.exit(1)
os.unlink(collected)
log.debug('Successful submission of {}', collected)
|
MartinPaulEve/CaSSius | src/interactive.py | Python | agpl-3.0 | 15,319 | 0.000979 | #!/usr/bin/env python
from __future__ import print_function
__author__ = "Martin Paul Eve"
__email__ = "martin@martineve.com"
"""
A class to handle an interactive prompt.
Portions of this file are Copyright 2014, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""
from debug import Debuggable
import sys
from di | fflib import SequenceMatcher
import locale
class Interactive(Debuggable):
def __init__(self, debug):
self.debug = debug
Debuggable.__init__(self, 'Interactive Prompt Handler')
# ANSI terminal colorization code heavily inspired by pygments:
# http://dev.pocoo.org/hg/pygments-main/file/b2deea5b5030/pygments/console.py
# | (pygments is by Tim Hatch, Armin Ronacher, et al.)
self.COLOR_ESCAPE = "\x1b["
self.DARK_COLORS = ["black", "darkred", "darkgreen", "brown", "darkblue",
"purple", "teal", "lightgray"]
self.LIGHT_COLORS = ["darkgray", "red", "green", "yellow", "blue",
"fuchsia", "turquoise", "white"]
self.RESET_COLOR = self.COLOR_ESCAPE + "39;49;00m"
def input_options(self, options, require=False, prompt=None, fallback_prompt=None,
numrange=None, default=None, max_width=72):
"""Prompts a user for input. The sequence of `options` defines the
choices the user has. A single-letter shortcut is inferred for each
option; the user's choice is returned as that single, lower-case
letter. The options should be provided as lower-case strings unless
a particular shortcut is desired; in that case, only that letter
should be capitalized.
By default, the first option is the default. `default` can be provided to
override this. If `require` is provided, then there is no default. The
prompt and fallback prompt are also inferred but can be overridden.
If numrange is provided, it is a pair of `(high, low)` (both ints)
indicating that, in addition to `options`, the user may enter an
integer in that inclusive range.
`max_width` specifies the maximum number of columns in the
automatically generated prompt string.
"""
# Assign single letters to each option. Also capitalize the options
# to indicate the letter.
letters = {}
display_letters = []
capitalized = []
first = True
for option in options:
# Is a letter already capitalized?
for letter in option:
if letter.isalpha() and letter.upper() == letter:
found_letter = letter
break
else:
# Infer a letter.
for letter in option:
if not letter.isalpha():
continue # Don't use punctuation.
if letter not in letters:
found_letter = letter
break
else:
raise ValueError('no unambiguous lettering found')
letters[found_letter.lower()] = option
index = option.index(found_letter)
# Mark the option's shortcut letter for display.
if not require and ((default is None and not numrange and first) or
(isinstance(default, basestring) and
found_letter.lower() == default.lower())):
# The first option is the default; mark it.
show_letter = '[%s]' % found_letter.upper()
is_default = True
else:
show_letter = found_letter.upper()
is_default = False
# Colorize the letter shortcut.
show_letter = self.colorize('green' if is_default else 'red',
show_letter)
# Insert the highlighted letter back into the word.
capitalized.append(
option[:index] + show_letter + option[index + 1:]
)
display_letters.append(found_letter.upper())
first = False
# The default is just the first option if unspecified.
if require:
default = None
elif default is None:
if numrange:
default = numrange[0]
else:
default = display_letters[0].lower()
# Make a prompt if one is not provided.
if not prompt:
prompt_parts = []
prompt_part_lengths = []
if numrange:
if isinstance(default, int):
default_name = str(default)
default_name = self.colorize('turquoise', default_name)
tmpl = '# selection (default %s)'
prompt_parts.append(tmpl % default_name)
prompt_part_lengths.append(len(tmpl % str(default)))
else:
prompt_parts.append('# selection')
prompt_part_lengths.append(len(prompt_parts[-1]))
prompt_parts += capitalized
prompt_part_lengths += [len(s) for s in options]
# Wrap the query text.
prompt = ''
line_length = 0
for i, (part, length) in enumerate(zip(prompt_parts,
prompt_part_lengths)):
# Add punctuation.
if i == len(prompt_parts) - 1:
part += '?'
else:
part += ','
length += 1
# Choose either the current line or the beginning of the next.
if line_length + length + 1 > max_width:
prompt += '\n'
line_length = 0
if line_length != 0:
# Not the beginning of the line; need a space.
part = ' ' + part
length += 1
prompt += part
line_length += length
# Make a fallback prompt too. This is displayed if the user enters
# something that is not recognized.
if not fallback_prompt:
fallback_prompt = 'Enter one of '
if numrange:
fallback_prompt += '%i-%i, ' % numrange
fallback_prompt += ', '.join(display_letters) + ':'
resp = self.input_(prompt)
while True:
resp = resp.strip().lower()
# Try default option.
if default is not None and not resp:
resp = default
# Try an integer input if available.
if numrange:
try:
resp = int(resp)
except ValueError:
pass
else:
low, high = numrange
if low <= resp <= high:
return resp
else:
resp = None
# Try a normal letter input.
if resp:
resp = resp[0]
if resp in letters:
return resp
# Prompt for new input.
resp = self.input_(fallback_prompt)
def input_(self, prompt=None):
"""Like `raw_input`, but decodes the result to a Unicode string.
Raises a UserError if stdin is not available. The prompt is sent to
stdout rather than stderr. A printed between the prompt and the
input cursor.
"""
# raw_input incorrectly sends prompts to stderr, not stdout, so we
# use |
wagtail/wagtail | wagtail/users/views/bulk_actions/__init__.py | Python | bsd-3-clause | 222 | 0.004505 | from .assign_role import AssignRoleBulkAction
from .delete import DeleteBulkAction
from .set_active_state import SetActiveStateBul | kAction
__all__ = ["AssignRoleBulkAction", " | DeleteBulkAction", "SetActiveStateBulkAction"]
|
SaintAttila/attila | attila/db/adodb.py | Python | mit | 18,695 | 0.002461 | """
ADODB database interface for Python
"""
# TODO: Map database to the file system interface. The FS interface already supports row-based
# reading and writing via the read_rows(), load_rows(), and save_rows() methods. However,
# those methods expect quotes and delimiters, which aren't required for row-based data stores
# like SQL tables. Maybe they can simply ignore their delimiter and quote parameters? The
# question then becomes, what about URLs and SQL queries? There is no doubt that the SQL table
# and delimited file paradigms can be mapped to each other. But how far do we take it, and
# how complicated is it going to get?
# TODO: Migrate off of COM, if possible.
import win32com.client
from ..abc import configurations
from ..abc import sql
from ..abc import transactions
from ..configurations import ConfigManager
from ..exceptions import verify_type
from ..plugins import config_loader
from ..security import credentials
__author__ = 'Aaron Hosford'
__all__ = [
'ADODBRecordSet',
'ADODBConnector',
'adodb_connection',
]
ADODB_CONNECTION_COM_CLASS_NAME = "ADODB.Connection"
DEFAULT_DRIVER = 'SQL Server'
# TODO: Add MySQL and other common and supported driver/dialect pairs.
DRIVER_DIALECT_MAP = {
'sql server': 'T-SQL',
}
class Constants:
"""
Microsoft-defined constants for use with ADODB. These have the original names (as ugly as they
are) preserved for Googling convenience. They are not meant to be exported as part of this
module's public interface and should only be used here within this module.
"""
# Cursor locations
adUseNone = 1
adUseServer = 2 # Default
adUseClient = 3
adUseClientBatch = 3
# Cursor types
adOpenUnspecified = -1
adOpenForwardOnly = 0 # Default; does not allow use of transactions. AKA "fire hose mode".
adOpenKeySet = 1
adOpenDynamic = 2
adOpenStatic = 3
# Object states
adStateClosed = 0
adStateOpen = 1
adStateConnecting = 2
adStateExecuting = 4
adStateFetching = 8
class ADODBRecordSet(sql.RecordSet):
"""
An ADODBRecordSet is returned whenever a query is executed. It provides an interface to the
selected data.
"""
def __init__(self, com_object):
self._com_object = com_object
def _next(self):
# It looks dead wrong, but it has to be this way. The COM object's interface is broken.
if self._com_object.EOF or self._com_object.BOF:
raise StopIteration()
# We should never expose the raw COM object at all. Grab the values out and put them in a
# tuple instead.
result = tuple(field.Value for field in self._com_object.Fields)
self._com_object.MoveNext()
return result
@config_loader
class ADODBConnector(sql.SQLConnector, configurations.Configurable):
"""
Stores the ADODB connection information for a database as a single object which can then be
passed around instead of using multiple parameters to a function. Use str(connector) to get the
actual connection string.
"""
@staticmethod
def _tokenize(string_value):
# TODO: docstring. also, can this be a regex?
token = ''
in_braces = False
for char in string_value:
if in_braces:
if char == '}':
in_braces = False
yield token
token = ''
else:
token += char
elif char == '{':
if token:
yield token
token = ''
in_braces = True
elif char in ('=', ';'):
if token:
yield token
token = ''
yield char
else:
token += char
assert not in_braces
if token:
yield token
@classmethod
def _parse(cls, string_value):
# TODO: docstring
key = None
equals = False
value = None
results = {}
for token in cls._tokenize(string_value):
if token == '=':
assert key and not equals and value is None
equals = True
elif token == ';':
assert key and equals and value
key = key.lower()
assert key not in results
results[key] = value
key = None
equals = False
value = None
elif key is None:
assert not equals and value is None
key = token
else:
assert key and equals and value is None
value = token
if key is not None:
assert key and equals and value
key = key.lower()
assert key not in results
results[key] = value
return results
@classmethod
def load_config_value(cls, manager, value, *args, **kwargs):
"""
Load a class instance from the value of a config option.
:param manager: A ConfigManager instance.
:param value: The string value of the option.
:return: A new instance of this class.
"""
verify_type(manager, ConfigManager)
assert isinstance(manager, ConfigManager)
verify_type(value, str, non_empty=True)
parameter_map = cls._parse(value)
# We specifically disallow passwords to be stored. It's a major security risk.
assert 'password' not in parameter_map and 'pwd' not in parameter_map
# We also want to catch any other, unfamiliar terms.
for key in parameter_map:
if key not in {'server', 'database', 'driver', 'trusted_connection', 'uid', 'dialect'}:
raise KeyError("Unrecognized term: " + repr(key))
server = parameter_map['server']
database = parameter_map['database']
driver = parameter_map.get('driver')
trusted = parameter_map.get('trusted_connection')
dialect = parameter_map.get('dialect')
if trusted is not None:
trusted = trusted.lower()
assert trusted in ('true', 'false')
trusted = (trusted == 'true')
if trusted:
credential = None
else:
user = parameter_map.get('uid')
if user is not None:
credential_string = user + '@' + server + '/adodb'
credential = manager.load_value(credential_string, credentials.Credential)
else:
credential = None
return cls(
*args,
server=server,
database=database,
driver=driver,
credential=credential,
| trusted=trusted,
dialect=dialect,
**kwargs
)
@classmethod
def load_config_section(cls, manager, section, *args, **kwargs):
"""
Load a class instance from a config section.
:param manager: A ConfigManager instance.
:param section: The name of the section.
:return: A new instance of this class.
"""
verify_type(mana | ger, ConfigManager)
assert isinstance(manager, ConfigManager)
verify_type(section, str, non_empty=True)
server = manager.load_option(section, 'server', str)
database = manager.load_option(section, 'database', str)
driver = manager.load_option(section, 'driver', str, default=None)
trusted = manager.load_option(section, 'trusted', 'bool', default=None)
dialect = manager.load_option(section, 'dialect', str, default=None)
if trusted:
credential = None
else:
credential = manager.load_option(section, 'credential',
credentials.Credential,
default=None)
if credential is None:
credential = manager.load_section(section,
loader=credentials.Credential,
|
PKU-Cloud-Lab/xLearn | python-package/xlearn/base.py | Python | apache-2.0 | 2,861 | 0.002097 | # Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from .libpath import find_lib_path
class XLearnError(Exception):
"""Error thrown by xlearn trainer"""
pass
def _load_lib():
"""Load xlearn shared library"""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
return lib
# load the xlearn library globally
_LIB = _load_lib()
def _check_call(ret):
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret : int
return value from API calls
"""
if ret != 0:
msg = ""
# raise XLearnError()
_LIB.XLearnGetLastError.restype = ctypes.POINTER(ctypes.c_ubyte)
ptr = _LIB.XLearnGetLastError()
idx = 0
while(ptr[idx] != 0):
msg += chr(ptr[idx])
idx += 1
raise XLearnError | (msg)
# type definitions
XLearnHandle = ctypes.c_void_p
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
| >>> print x.value
Hello, world!
"""
return ctypes.c_char_p(string)
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Pyrhon string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = c_str("Hello, world!")
>>> print(x.value)
Hello, world!
"""
return ctypes.c_char_p(string.encode('utf-8'))
"""pandas"""
try:
from pandas import Series, DataFrame
except ImportError:
class Series(object):
"""Dummy class for pandas.Series."""
pass
class DataFrame(object):
"""Dummy class for pandas.DataFrame."""
pass
|
mljar/mljar-api-python | mljar/model/result.py | Python | apache-2.0 | 2,694 | 0.003712 | from marshmallow import Schema, fields, post_load
from .base import BaseModel
class ResultSchema(Schema):
hid = fields.Str()
experiment = fields.Str()
dataset = fields.Str()
validation_scheme = fields.Str()
model_type = fields.Str()
metric_type = fields.Str()
metric_value = fields.Number(allow_none=True)
run_time = fields.Number(allow_none=True)
iters = fields.Number(allow_none=True)
status = fields.Str()
status_detail = fields.Str(allow_none=True)
status_modify_at = fields.DateTime()
importance = fields.Dict(allow_none=True)
train_prediction_path = fields.Str(allow_none=True)
params = fields.Dict(allow_none=True)
train_details = fields.Dict(allow_none=True)
models_saved = fields.Str(allow_none=True)
metric_additional = fields.Dict(allow_none=True)
@post_load
def make_result_instance(self, data):
return Result(**data)
class Result(BaseModel):
schema = ResultSchema(strict=True)
def __init__(self, hid, experiment, dataset, validation_scheme, model_type, metric_type,
par | ams, status, status_detail=None, status_modify_at=None, metric_value=None,
importance=None, train_prediction_path=None, run_time=None, iters=None, train_details=None,
metric_additional=None, models_saved=None):
self.hid = hid
self.experiment = experiment
self.dataset = dataset
self.validation_scheme = validation_scheme |
self.model_type = model_type
self.metric_type = metric_type
self.metric_value = metric_value
self.run_time = run_time
self.iters = iters
self.status = status
self.status_detail = status_detail
self.status_modify_at = status_modify_at
self.importance = importance
self.train_prediction_path = train_prediction_path
self.params = params
self.train_details = train_details
self.models_saved = models_saved
self.metric_additional = metric_additional
def __str__(self):
desc = 'Result id: {} model: {} status: {}\n'.format(self.hid, self.model_type, self.status)
desc += 'Performance: {} on {} with {}\n'.format(str(self.metric_value), self.metric_type, self.validation_scheme)
return desc
'''
def _get_full_model_name(self, model_type):
model_name = ''
if model_type in MLJAR_BIN_CLASS:
model_name = MLJAR_BIN_CLASS[model_type]
if model_type in MLJAR_REGRESSION:
model_name = MLJAR_REGRESSION[model_type]
if model_name == '':
model_name = model_type
return model_name
'''
|
xrootd/xrootd-test-framework | src/XrdTest/EmailNotifier.py | Python | gpl-3.0 | 7,244 | 0.007454 | #!/usr/bin/env python
#-------------------------------------------------------------------------------
#
# Copyright (c) 2011-2012 by European Organization for Nuclear Research (CERN)
# Author: Justin Salmon <jsalmon@cern.ch>
#
# This file is part of XrdTest.
#
# XrdTest is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# XrdTest is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XrdTest. If not, see <http://www.gnu.org/licenses/>.
#
#-------------------------------------------------------------------------------
#
# File: EmailNotifier.py
# Desc: Functionality for sending email notifications to a set of email
# addresses in case of test suite success/failure, based on policies
# about the frequency and type of notifications desired.
#
#-------------------------------------------------------------------------------
from Utils import Logger
LOGGER = Logger(__name__).setup()
try:
import sys
import os
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
except ImportError, e:
LOGGER.error(str(e))
sys.exit(1)
class EmailNotifierException(Exception):
'''
General Exception raised by EmailNotifier.
'''
def __init__(self, desc):
'''
Constructs Exception
@param desc: description of an error
'''
self.desc = desc
def __str__(self):
'''
Returns textual representation of an error
'''
return repr(self.desc)
class EmailNotifier(object):
POLICY_CASE = "CASE"
POLICY_SUITE = "SUITE"
POLICY_NONE = "NONE"
SUITE_EVENT = 0
CASE_EVENT = 1
TIMEOUT_EVENT = 3
SENDER = 'XRootD Testing Framework <master@xrd.test>'
def __init__(self, emails, success_policy, failure_policy):
self.emails = emails
self.success_policy = success_policy
self.failure_policy = failure_policy
def notify_success(self, args, desc, type):
send = False
if self.success_policy == self.POLICY_CASE:
send = True
elif self.success_policy == self.POLICY_SUITE:
if type in (self.SUITE_EVENT, self.TIMEOUT_EVENT):
send = True
elif self.success_policy == self.POLICY_NONE:
return
else:
LOGGER.error('Invalid success alert policy: %s' \
% self.success_policy)
if send:
msg = self._build(args, desc, type)
self._send(msg, self.emails)
def notify_failure(self, args, desc, type):
send = False
if self.failure_policy == self.POLICY_CASE:
send = True
elif self.failure_policy == self.POLICY_SUITE:
if type in (self.SUITE_EVENT, self.TIMEOUT_EVENT):
send = True
elif self.failure_policy == self.POLICY_NONE:
return
else:
LOGGER.error('Invalid failure alert policy: %s' \
% self.failure_policy)
if send:
msg = self._build(args, desc, type)
self._send(msg, self.emails)
def _build(self, args, desc, type):
en = EmailNotification()
# Create message container - correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
subject = '%s (suite: %s%s%s)' % (desc, args['testsuite'], \
' test case: ' + args['testcase'] if args['testcase'] else '',
' slave: ' + args['slave'] if args['slave'] else '')
msg['Subject'] = subject % args
msg['From'] = self.SENDER
if args['failed_cases'] and int(args['failure']):
args['failed_cases'] = 'Failed test cases: <strong>' + \
', '.join([c.name for c in args['failed_cases']]) + \
'</strong>'
else: args['failed_cases'] = ''
if int(args['failure']):
args['failure'] = 'Failure'
else:
args['failu | re'] = 'Success'
if args['testcase']:
| args['testcase'] = 'Test case: <strong>' + \
args['testcase'] + '</strong><br />'
if args['slave']:
args['slave'] = 'Slave: <strong>' + \
args['slave'] + '</strong><br />'
if args['result']:
args['result'] = 'Output from slave: <br /><strong>' + \
args['result'] + '</strong><br />'
args.update({'desc': desc, 'css': en.css})
text = en.body_text % args
html = en.body_html % args
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
msg.attach(part1)
msg.attach(part2)
return msg
def _send(self, msg, recipients):
msg['To'] = ', '.join([r for r in recipients])
# Send the message via local SMTP server.
try:
s = smtplib.SMTP('localhost')
s.sendmail(self.SENDER, recipients, msg.as_string())
s.quit()
except Exception, e:
LOGGER.error('Cannot send notification email: %s' % str(e))
class EmailNotification(object):
@property
def body_text(self):
return \
"""
Test suite: %(testsuite)s
Description: %(desc)s
Time: %(time)s
%(slave)s
%(testcase)s
%(failed_cases)s
%(result)s
"""
@property
def body_html(self):
return \
"""
<html>
<head>
<style type="text/css">%(css)s</style>
</head>
<body>
<p>
Test suite: <strong>%(testsuite)s</strong>
<br />
Description: <strong>%(desc)s</strong>
<br />
Time: <strong>%(time)s</strong>
<br />
%(slave)s
%(testcase)s
%(failed_cases)s
</p>
<p>
<code>%(result)s</code>
</p>
</body>
</html>
"""
@property
def css(self):
return \
"""
html,body,div,span,h1,h2,h3,h4,h5,h6,p,code,em,small,strong,i {
font-size: 100%;
font-family: Courier New, Courier, monospace;
}
code {
white-space: pre-wrap;
}
"""
|
morelab/labman_ud | labman_ud/extractors/zotero/admin.py | Python | gpl-3.0 | 970 | 0.008247 | # -*- encoding: utf-8 -*-
from django.contrib import admin
from .models import ZoteroExtractorLog
### ZoteroExtractorLogAdmin
####################################################################################################
class ZoteroExtractorLogAdmin(admin.ModelAdmin):
model = ZoteroExtractorLog
list_display = ['item_key', 'version', 'timestamp', 'publication']
search_fields = ['item_key', 'version', 'publication__title', 'publication__slug']
####################################################################################################
####################################################################################################
### Register classes
####################################################################################################
#################################### | ################################################################
admin.site.register(ZoteroExtractorLog, Zote | roExtractorLogAdmin)
|
yfpeng/pengyifan-pybioc | tests/bioc/test_json_encoder.py | Python | bsd-3-clause | 1,888 | 0.00053 | import io
import tempfile
from pathlib import Path
import pytest
from bioc import BioCFileType
from bioc.biocjson.encoder import toJSON
import bioc
from bioc.biocjson import BioCJsonIterWriter
from tests.utils import assert_everything
file = Path(__file__).parent / 'everything.json'
def test_dump():
with open(file, encoding='utf8') as fp:
collection = bioc.load(fp, BioCFileType.BIOC_JSON)
tmp = tempfile.mktemp()
with open(tmp, 'w', encoding='utf8') as fp:
bioc.dump(collection, fp, BioCFileType.BIOC_JSON)
with open(tmp, encoding='utf8') as fp:
collection = bioc.load(fp, BioCFileType.BIOC_JSON)
assert_everything(collection)
def test_dumps():
with open(file, encoding='utf8') as fp:
collection = bioc.load(fp, BioCFileType.BIOC_JSON)
s = bioc.dumps(collection, BioCFileType.BIOC_JSON)
collection = bioc.loads(s, BioCFileType.BIOC_JSON)
assert_everything(collection)
def test_level():
with pytest.raises(ValueError):
BioCJsonIterWriter(io.StringIO(), level=-1)
with open(file, encoding='utf8') as fp:
collection = bioc.load(fp, BioCFileType.BIOC_JSON)
with pytest.raises(ValueError):
writer = BioCJsonIterWriter(io.StringIO(), level=bioc.SENTENCE)
writer.write(collec | tion.documents[0])
with pytest.raises(ValueError):
writer = BioCJsonIterWriter(io. | StringIO(), level=bioc.PASSAGE)
writer.write(collection.documents[0])
with pytest.raises(ValueError):
writer = BioCJsonIterWriter(io.StringIO(), level=bioc.DOCUMENT)
writer.write(collection.documents[0].passages[0])
def test_toJSON():
with open(file, encoding='utf8') as fp:
collection = bioc.load(fp, BioCFileType.BIOC_JSON)
obj = toJSON(collection)
assert obj['documents'][0]['id'] == '1'
with pytest.raises(TypeError):
toJSON({})
|
lucemia/gcloud-python | gcloud/datastore/batch.py | Python | apache-2.0 | 10,313 | 0 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with a batch of updates / deletes."""
from gcloud._localstack import _LocalStack
from gcloud.datastore import _implicit_environ
from gcloud.datastore import helpers
from gcloud.datastore.key import _dataset_ids_equal
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
_BATCHES = _LocalStack()
class Batch(object):
"""An abstraction representing a collected group of updates / deletes.
Used to build up a bulk mutuation.
For example, the following snippet of code will put the two ``save``
operations and the delete operatiuon into the same mutation, and send
them to the server in a single API request::
>>> from gcloud.datastore.batch import Batch
>>> batch = Batch()
>>> batch.put(entity1)
>>> batch.put(entity2)
>>> batch.delete(key3)
>>> batch.commit()
You can also use a batch as a context manager, in which case the
``commit`` will be called automatically if its block exits without
raising an exception::
>>> with Batch() as batch:
... batch.put(entity1)
... batch.put(entity2)
... batch.delete(key3)
By default, no updates will be sent if the block exits with an error::
>>> from gcloud import datastore
>>> dataset = datastore.get_dataset('dataset-id')
>>> with Batch() as batch:
... do_some_work(batch)
... raise Exception() # rolls back
"""
def __init__(self, dataset_id=None, connection=None):
""" Construct a batch.
:type dataset_id: :class:`str`.
:param dataset_id: The ID of the dataset.
:type connection: :class:`gcloud.datastore.connection.Connection`
:param connection: The connection used to connect to datastore.
:raises: :class:`ValueError` if either a connection or dataset ID
are not set.
"""
self._connection = connection or _implicit_environ.CONNECTION
self._dataset_id = dataset_id or _implicit_environ.DATASET_ID
if self._connection is None or self._dataset_id is None:
raise ValueError('A batch must have a connection and '
'a dataset ID set.')
self._mutation = datastore_pb.Mutation()
self._auto_id_entities = []
@staticmethod
def current():
"""Return the topmost batch / transaction, or None."""
return _BATCHES.top
@property
def dataset_id(self):
"""Getter for dataset ID in which the batch will run.
:rtype: :class:`str`
:returns: The dataset ID in which the batch will run.
"""
return self._dataset_id
@property
def connection(self):
"""Getter for connection over which the batch will run.
:rtype: :class:`gcloud.datastore.connection.Connection`
:returns: The connection over which the batch will run.
"""
return self._connection
@property
def mutation(self):
"""Getter for the current mutation.
Every batch is committed with a single Mutation
representing the 'work' to be done as part of the batch.
Inside a batch, calling ``batch.put()`` with an entity, or
``batch.delete`` with a key, builds up the mutation.
This getter returns the Mutation protobuf that
has been built-up so far.
:rtype: :class:`gcloud.datastore._datastore_v1_pb2.Mutation`
:returns: The Mutation protobuf to be sent in the commit request.
"""
return self._mutation
def add_auto_id_entity(self, entity):
"""Adds an entity to the list of entities to update with IDs.
When an entity has a partial key, calling ``save()`` adds an
insert_auto_id entry in the mutation. In order to make sure we
update the Entity once the transaction is committed, we need to
keep track of which entities to update (and the order is
important).
When you call ``save()`` on an entity inside a transaction, if
the entity has a partial key, it adds itself to the list of
entities to be updated once the transaction is committed by
calling this method.
:type entity: :class:`gcloud.datastore.entity.Entity`
:param entity: The entity to be updated with a completed key.
:raises: ValueError if the entity's key is alread completed.
"""
if not entity.key.is_partial:
raise ValueError("Entity has a completed key")
self._auto_id_entities.append(entity)
def put(self, entity):
"""Remember an entity's state to be saved during ``commit``.
.. note::
Any existing properties for the entity will be replaced by those
currently set on this instance. Already-stored properties which do
not correspond to keys set on this instance will be removed from
the datastore.
.. note::
Property values which are "text" ('unicode' in Python2, 'str' in
Python3) map to 'string_value' in the datastore; values which are
"bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
:type entity: :class:`gcloud.datastore.entity.Entity`
:param entity: the entity to be saved.
:raises: ValueError if entity has no key assigned, or if the key's
` | `dataset_id`` does not match ours.
"""
if entity.key is None:
raise ValueError("Entity mus | t have a key")
if not _dataset_ids_equal(self._dataset_id, entity.key.dataset_id):
raise ValueError("Key must be from same dataset as batch")
_assign_entity_to_mutation(
self.mutation, entity, self._auto_id_entities)
def delete(self, key):
"""Remember a key to be deleted durring ``commit``.
:type key: :class:`gcloud.datastore.key.Key`
:param key: the key to be deleted.
:raises: ValueError if key is not complete, or if the key's
``dataset_id`` does not match ours.
"""
if key.is_partial:
raise ValueError("Key must be complete")
if not _dataset_ids_equal(self._dataset_id, key.dataset_id):
raise ValueError("Key must be from same dataset as batch")
key_pb = key.to_protobuf()
helpers._add_keys_to_request(self.mutation.delete, [key_pb])
def begin(self):
"""No-op
Overridden by :class:`gcloud.datastore.transaction.Transaction`.
"""
pass
def commit(self):
"""Commits the batch.
This is called automatically upon exiting a with statement,
however it can be called explicitly if you don't want to use a
context manager.
"""
response = self.connection.commit(self._dataset_id, self.mutation)
# If the back-end returns without error, we are guaranteed that
# the response's 'insert_auto_id_key' will match (length and order)
# the request's 'insert_auto_id` entities, which are derived from
# our '_auto_id_entities' (no partial success).
for new_key_pb, entity in zip(response.insert_auto_id_key,
self._auto_id_entities):
new_id = new_key_pb.path_element[-1].id
entity.key = entity.key.completed_key(new_id)
def rollback(self):
"""No-op
Overridden by :class:`gcloud.datastore.transaction.Transaction`.
"""
pass
def __enter__(self):
_BATCHES.push(self)
self.begi |
vrsys/avangong | examples/avango-utils/load_pcd.py | Python | lgpl-3.0 | 3,367 | 0.005346 | # -*- Mode:Python -*-
##########################################################################
# #
# This file is part of AVANGO. #
# #
# Copyright 1997 - 2010 Fraunhofer-Gesellschaft zur Foerderung der #
# angewandten Forschung (FhG), Munich, Germany. #
# #
# AVANGO is free software: you can redistribu | te it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public | License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
# #
##########################################################################
import avango.osg.viewer
import avango.moving
import avango.tools
import avango.display
import os
import sys
import getopt
import sys
#options
short_options="h"
long_options=["pcd="]
#initialize the display
args = avango.display.init(sys.argv, short_options, long_options)
view = avango.display.make_view()
view.EnableTrackball.value = True
#default model
filename = ""
#parse options
try:
opts, args = getopt.getopt(args, short_options, long_options)
except getopt.GetoptError, err:
print "error: wrong arguments"
opts = [("-h", "")]
for opt, arg in opts:
if opt in ("-h", "--help"):
print "Options:"
print " --pcd=<filename> Specify the pcd file to be loaded"
exit(0)
elif opt in ("--pcd"):
filename = arg
if filename == "":
print "No pcd file given. Use --pcd=<filename>. Exit."
sys.exit()
#check if the model exists
elif not os.path.exists(filename):
print "File: " + str(filename) + " does not exist. Exit."
sys.exit(0)
#create mouse device
mouse = avango.display.make_dominant_user_device(interface="Mouse")
# set up scene graph
view.Root.value = avango.osg.nodes.Group()
#geode = avango.utils.load_pcd_file(filename)
#obj_trans.Children.value.append(geode)
#load the points
pcd_cloud = avango.utils.nodes.PCLPointCloud(Filename=filename)
#create a geometry and connect the points and colors
geometry = avango.osg.nodes.Geometry()
geometry.VertexArray.connect_from(pcd_cloud.Points)
geometry.ColorArray.connect_from(pcd_cloud.Colors)
geometry.ColorBinding.value = 4 # per vertex
geometry.Mode.value = 0 # points
#add the geometry into a geode and add it to the scenegraph
geode = avango.osg.nodes.Geode(Drawables=[geometry])
view.Root.value.Children.value.append(geode)
# run evaluation and render loop
avango.display.run()
|
Rahul91/CommunityNewsPortal | manage.py | Python | mit | 253 | 0 | #!/usr/bin/env p | ython
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "NewsPortal.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys | .argv)
|
MingfeiPan/leetcode | dp/72.py | Python | apache-2.0 | 859 | 0.003492 | class Solution:
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
l1 = len(word1)
l2 = len(word2)
if l1 == 0 and | l2 != 0:
return l2
if l1 != 0 and l2 == 0:
return l1
matrix = [[0]*(l2+1) for i in range(0, l1+1)]
for i in range(0, l1+1):
for j in range(0, l2+1):
if i == 0:
matrix[i][j] = j
elif j == 0:
matrix[i][j] = i
else:
if word1[i-1] == word2[j-1]:
| matrix[i][j] = matrix[i-1][j-1]
else:
matrix[i][j] = min(matrix[i][j-1], matrix[i-1][j], matrix[i-1][j-1]) + 1
return matrix[l1][l2]
|
anhstudios/swganh | data/scripts/templates/object/ship/shared_blacksun_medium_s03_tier2.py | Python | mit | 422 | 0.049763 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### M | ODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Ship()
result.template = "object/ship/shared_blacksun_medium_s03_tier2.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFIC | ATIONS ####
return result |
junghans/espressopp | src/analysis/AnalysisBase.py | Python | gpl-3.0 | 3,450 | 0.015362 | # Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*****************************************************************
**AnalysisBase** - abstract base class for analysis/measurement
*****************************************************************
This abstract base class provides the interface and some basic
functionality for classes that do analysis or observable measurements
It provides the following methods:
.. function:: espressopp.analysis.AnalysisBase.compute()
Computes the instant value of the observable.
:rtype: a python list or a scalar
.. function:: espressopp.analysis.AnalysisBase.getAverageValue()
Returns the average value for the observable and the standard deviation.
:rtype: a python list
.. function:: espressopp.analysis.AnalysisBase.getNumberOfMeasurements()
counts the number of measurements that have been performed (standalone or in integrator)
does _not_ include measurements that have been done using "compute()"
:rtype:
.. function:: espressopp.analysis.AnalysisBase.performMeasure | ment()
Computes the observable and updates average and standard deviation
:rtype:
.. function:: espressopp.analysis.AnalysisBase.reset()
Resets average and standard deviation
:rtype:
"""
from espressopp import pmi
from espressopp.ParticleAccess import *
from _espressopp import analysis_AnalysisBase
class AnalysisBaseLocal(ParticleAccessLocal, analysis_AnalysisBase):
def performMeasurement(self):
if not pmi._PMIComm or pmi._MPIco | mm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.performMeasurement(self)
def reset(self):
if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.reset(self)
def compute(self):
if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
res = self.cxxclass.compute(self)
if len(res) > 1:
return res
else:
return res[0]
def getAverageValue(self):
if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getAverageValue(self)
def getNumberOfMeasurements(self):
if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getNumberOfMeasurements(self)
if pmi.isController :
class AnalysisBase(ParticleAccess):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
pmicall = [ "performMeasurement", "reset", "compute", "getAverageValue", "getNumberOfMeasurements" ]
)
|
dnjohnstone/hyperspy | hyperspy/drawing/_widgets/vertical_line.py | Python | gpl-3.0 | 1,544 | 0 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# Y | ou should | have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from hyperspy.drawing.widgets import Widget1DBase
from hyperspy.drawing.utils import picker_kwargs
class VerticalLineWidget(Widget1DBase):
"""A draggable, vertical line widget.
"""
def _update_patch_position(self):
if self.is_on() and self.patch:
self.patch[0].set_xdata(self._pos[0])
self.draw_patch()
def _set_patch(self):
ax = self.ax
kwargs = picker_kwargs(5)
self.patch = [ax.axvline(self._pos[0],
color=self.color,
alpha=self.alpha,
**kwargs)]
def _onmousemove(self, event):
"""on mouse motion draw the cursor if picked"""
if self.picked is True and event.inaxes:
self.position = (event.xdata,)
|
chirilo/mozillians | vendor-local/lib/python/celery/task/__init__.py | Python | bsd-3-clause | 2,664 | 0 | # -*- coding: utf-8 -*-
"""
celery.task
~~~~~~~~~~~
Creating tasks, subtasks, sets and chords.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from ..app import app_or_default, current_task as _current_task
from ..local import Proxy
from .base import Task, PeriodicTask # noqa
from .sets import group, TaskSet, subtask # noqa
from .chords import chord | # noqa
from .control import discard_all # noqa
current = Proxy(_current_task)
def task(*args, **kwargs):
"""Decorator to create a task class out of any callable.
**Examples**
.. code-block:: python
@task
def refresh_feed(url):
return Feed.objects.get(url=url).refresh()
With setting extra options and using retry.
.. code-block:: python
@task(max_retries=10)
def refresh_feed(url):
| try:
return Feed.objects.get(url=url).refresh()
except socket.error, exc:
refresh_feed.retry(exc=exc)
Calling the resulting task:
>>> refresh_feed("http://example.com/rss") # Regular
<Feed: http://example.com/rss>
>>> refresh_feed.delay("http://example.com/rss") # Async
<AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d>
"""
kwargs.setdefault("accept_magic_kwargs", False)
return app_or_default().task(*args, **kwargs)
def periodic_task(*args, **options):
"""Decorator to create a task class out of any callable.
.. admonition:: Examples
.. code-block:: python
@task
def refresh_feed(url):
return Feed.objects.get(url=url).refresh()
With setting extra options and using retry.
.. code-block:: python
from celery.task import current
@task(exchange="feeds")
def refresh_feed(url):
try:
return Feed.objects.get(url=url).refresh()
except socket.error, exc:
current.retry(exc=exc)
Calling the resulting task:
>>> refresh_feed("http://example.com/rss") # Regular
<Feed: http://example.com/rss>
>>> refresh_feed.delay("http://example.com/rss") # Async
<AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d>
"""
return task(**dict({"base": PeriodicTask}, **options))
@task(name="celery.backend_cleanup")
def backend_cleanup():
backend_cleanup.backend.cleanup()
|
stgraber/snapcraft | snapcraft/internal/states/__init__.py | Python | gpl-3.0 | 947 | 0 | # -*- Mode:Python; indent | -tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute | it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from snapcraft.internal.states._prime_state import PrimeState # noqa
from snapcraft.internal.states._stage_state import StageState # noqa
from snapcraft.internal.states._build_state import BuildState # noqa
from snapcraft.internal.states._pull_state import PullState # noqa
|
gwsu2008/automation | python/awscreds-custom.py | Python | gpl-2.0 | 2,552 | 0.002351 | #!/usr/local/bin/python3
import sys
import boto3
import os
from botocore.exceptions import ClientError
import json
import argparse
from botocore.utils import InstanceMetadataFetcher
from botocore.credentials import InstanceMetadataProvider
import platform
region = os.getenv('AWS_DEFAULT_REGION', 'us-east-1')
duration = int(os.getenv('AWS_CLIENT_DURATION', 7200))
aws_access_key_id = os.getenv('CI_AWS_ACCESS_KEY_ID', None)
aws_secret_access_key = os.getenv('CI_AWS_SECRET_ACCESS_KEY', None)
session_name = 'jenkkins'
parser = argparse.ArgumentParser(description='AWS creds custom')
parser.add_argument('--role-arn', '-r', default=None, dest='role_arn', help='AWS IAM role arn for temp session token.')
args, unknown = parser.parse_known_args()
role_arn = args.role_arn
if role_arn is not None:
try:
provider = InstanceMetadataProvider(iam_role_fetcher=InstanceMetadataFetcher(timeout=1000, num_attempts=5))
_creds = provider.load()
temp_session = boto3.Session(
aws_access_key_id=_creds.access_key, aws_secret_access_key=_creds.secret_key, aws_session_token=_creds.token)
sts_client = temp_session.client("sts", region_name=region | )
params = {"RoleArn": role_arn, "RoleSessionName": session_name, "DurationSeconds": duration,}
response = sts_client.assume_role(**params).get("Credentials")
cred = | {
"Version": 1,
"AccessKeyId": response.get("AccessKeyId"),
"SecretAccessKey": response.get("SecretAccessKey"),
"SessionToken": response.get("SessionToken"),
"Expiration": response.get("Expiration").isoformat(),
}
except ClientError as ex:
sys.exit(255)
else:
if aws_access_key_id is None or aws_secret_access_key is None:
sys.exit(255)
try:
params = {"aws_access_key_id": aws_access_key_id, "aws_secret_access_key": aws_secret_access_key, "region_name": region}
temp_session = boto3.Session(**params)
sts_client = temp_session.client("sts", region_name=region)
params = {"DurationSeconds": duration}
response = sts_client.get_session_token(**params).get("Credentials")
cred = {
"Version": 1,
"AccessKeyId": response.get("AccessKeyId"),
"SecretAccessKey": response.get("SecretAccessKey"),
"SessionToken": response.get("SessionToken"),
"Expiration": response.get("Expiration").isoformat(),
}
except ClientError as ex:
sys.exit(255)
print(json.dumps(cred))
|
GeoCSBI/UTH_DB | mysite/uth_db/migrations/0012_auto_20170104_0938.py | Python | gpl-3.0 | 547 | 0.001828 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-04 09 | :38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('uth_db', '0011_auto_20170102_1649'),
]
operations = [
migrations.AlterField(
model_name='ta | ble',
name='booking',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='uth_db.Booking'),
),
]
|
gongminmin/KlayGE | glloader/autogen.py | Python | gpl-2.0 | 18,019 | 0.027249 | #!/usr/bin/env python
#-*- coding: ascii -*-
from __future__ import print_function
try:
from StringIO import StringIO
except:
from io import StringIO
GPLNotice = """// glloader
// Copyright (C) 2004-2009 Minmin Gong
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published
// by the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
class Typedef:
def __init__(self, type_name, synonym):
assert len(type_name) > 0
assert len(synonym) > 0
self.type_name = type_name
self.synonym = synonym
def __str__(self):
return "typedef %s %s;" % (self.type_name, self.synonym)
class Token:
def __init__(self, name, value):
assert len(name) > 0
assert len(value) > 0
self.name = name
self.value = value
def __str__(self):
return "#define %s %s" % (self.name, self.value)
class Param:
def __init__(self, type_name, name):
assert len(type_name) > 0
assert len(name) > 0
self.type_name = type_name
self.name = name
def __str__(self):
return "%s %s" % (self.type_name, self.name)
class Mapping:
def __init__(self, from_ext, name):
assert len(from_ext) > 0
assert len(name) > 0
self.from_ext = from_ext
self.name = name
class Function:
def __init__(self, return_type, name, static_link, params, mappings):
assert len(return_type) > 0
assert len(name) > 0
self.return_type = return_type
self.name = name
self.static_link = static_link
self.params = params
self.mappings = mappings
def params_str(self):
ret = ''
i = 0
for i, param in enumerate(self.params):
ret += str(param)
if i != len(self.params) - 1:
ret += ', '
if len(ret) == 0:
ret = "void"
return ret
def param_names_str(self):
ret = ''
i = 0
for i, param in enumerate(self.params):
ret += param.name
if i != len(self.params) - 1:
ret += ', '
return ret
class Extension:
def __init__(self, dom, quite_mode):
self.name = dom.documentElement.getAttribute("name")
if dom.documentElement.hasAttribute("predefined"):
self.predefined = dom.documentElement.getAttribute("predefined")
else:
self.predefined = None
if not quite_mode:
if dom.documentElement.getAttributeNode("reg_no") == None:
print("\tWarning: %s is not in the OpenGL Extension Registry." % dom.documentElement.getAttribute("name"))
self.typedefs = []
typedefsTag = dom.documentElement.getElementsByTagName("typedefs")
if (typedefsTag):
for typedef in typedefsTag[0].getElementsByTagName("typedef"):
self.typedefs.append(Typedef(typedef.getAttribute("type"),
typedef.getAttribute("synonym")))
self.tokens = []
tokensTag = dom.documentElement.getElementsByTagName("tokens")
if (tokensTag):
for token in tokensTag[0].getElementsByTagName("token"):
self.tokens.append(Token(token.getAttribute("name"),
token.getAttribute("value")))
self.functions = []
funcionsTag = dom.documentElement.getElementsByTagName("functions")
if (funcionsTag):
for function in funcionsTag[0].getElementsByTagName("function"):
params = []
paramsTag = function.getElementsByTagName("params")
if (paramsTag):
for param in paramsTag[0].getElementsByTagName("param"):
params.append(Param(param.getAttribute("type"),
param.getAttribute("name")))
mappings = []
mappingsTag = function.getElementsByTagName("mappings")
if (mappingsTag):
for mapping in mappingsTag[0].getElementsByTagName("mapping"):
mappings.append(Mapping(mapping.getAttribute("from"),
mapping.getAttribute("name")))
static_link = False
link_attr = function.getAttribute("link")
if (link_attr != None):
if "static" == str(link_attr):
static_link = True
self.functions.append(Function(function.getAttribute("return"),
function.getAttribute("name"),
static_link,
params, mappings))
self.additionals = []
additionalsTag = dom.documentElement.getElementsByTagName("additionals")
if (additionalsTag):
for ext_tag | in additionalsTag[0].getElementsByTagName("ext"):
if ext_tag.parentNode == additionalsTag[0]:
self.additionals.append([ext_tag.getAttribute("name")])
for one_of_tag in additionalsTag[0].getElementsByTagName("one_of"):
one_of = []
for ext in one_of_tag.getElementsByTagName("ext"):
one_of.append(ext.getAttribute("name"))
self.additionals.append(one_of)
def create_header(prefix, extensions, base_dir, quite_mode):
header_str = StringIO()
header_str.wri | te("/*\n%s*/\n\n" % GPLNotice);
header_str.write("#ifndef _GLLOADER_%s_H\n" % prefix.upper())
header_str.write("#define _GLLOADER_%s_H\n\n" % prefix.upper())
header_str.write("#ifdef __cplusplus\n")
header_str.write("extern \"C\"\n")
header_str.write("{\n")
header_str.write("#endif\n\n")
typedef_set = set()
token_set = set()
function_set = set()
for extension in extensions:
header_str.write("#ifndef %s\n" % extension.name)
header_str.write("#define %s 1\n" % extension.name)
header_str.write("#endif\n\n")
for extension in extensions:
if extension.tokens:
header_str.write("#ifdef %s\n\n" % extension.name)
if extension.predefined != None:
header_str.write("#ifdef %s\n\n" % extension.predefined)
for token in extension.tokens:
if (token.name not in token_set):
header_str.write("%s\n" % token)
token_set.add(token.name)
header_str.write("\n")
if extension.predefined != None:
header_str.write("#endif\n\n")
header_str.write("#endif\n\n")
for extension in extensions:
if (extension.typedefs):
header_str.write("#ifdef %s\n\n" % extension.name)
if extension.predefined != None:
header_str.write("#ifdef %s\n\n" % extension.predefined)
for typedef in extension.typedefs:
if (typedef.synonym not in typedef_set):
header_str.write("%s\n" % typedef)
typedef_set.add(typedef.synonym)
header_str.write("\n")
if extension.predefined != None:
header_str.write("#endif\n\n")
header_str.write("#endif\n\n")
for extension in extensions:
if (extension.functions):
header_str.write("#ifdef %s\n\n" % extension.name)
if extension.predefined != None:
header_str.write("#ifdef %s\n\n" % extension.predefined)
for function in extension.functions:
if (function.name not in function_set):
header_str.write("typedef %s (GLLOADER_APIENTRY *%sFUNC)(%s);\n" % (function.return_type, function.name, function.params_str()))
header_str.write("\n")
for function in extension.functions:
if (function.name not in function_set):
header_str.write("extern GLLOADER_API %sFUNC %s;\n" % (function.name, function.name))
function_set.add(function.name)
header_str.write("\n")
if extension.predefined != None:
header_str.write("#endif\n\n")
header_str.write("#endif\n\n")
for extension in extensions:
header_str.write("typedef char (GLLOADER_APIENTRY *glloader_%sFUNC)(void);\n" % extension.name)
header_str.write("\n")
for extension in extensions:
header_str.write("extern GLLOADER_API glloader_%sFUNC glloader_%s;\n" % (extension.name, extension.name))
header_str.write("\n")
header_str.write("#ifdef __cplusplus\n")
header_str.write("}\n")
header_str.write("#endif\n\n")
header_str.write("#endif /* _GLLOADER_%s_H */\n" % prefix.upper())
try:
cur_header_file = open("%s/include/glloader/glloader_%s.h" |
Akka47/ymcmb | ymc/urls.py | Python | gpl-2.0 | 591 | 0.001692 | from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.conf import settings
from django.conf.urls import url, include
from . import views
urlpatterns = [
url(r'^login/$', auth_views.login, name='login'),
url(r'^logout/$', auth_views.logout, name='logout'),
url(r'^admin/', admin.site.urls),
url(r'^avatar | /', include('avatar.urls')),
url(r'^$', views.video_list, name='video_list'),
url(r'^list/$', views.list, name='list'),
| url(r'^categoria/(?P<cat>[0-9]+)/$', views.categoria_list, name="ymc.views.video_categoria"),
]
|
dennisobrien/bokeh | bokeh/sampledata/commits.py | Python | bsd-3-clause | 2,129 | 0.009864 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from ..util.sampledata import package_csv
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-- | ---------------------------------------------------------------------------
#---------------------------------- | -------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _read_data():
'''
'''
data = package_csv('commits', 'commits.txt.gz', parse_dates=True, header=None, names=['day', 'datetime'], index_col='datetime')
data = data.tz_localize('GMT').tz_convert('US/Central')
data['time'] = data.index.time
return data
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
data = _read_data()
|
weolar/miniblink49 | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/browser_test_driver.py | Python | apache-2.0 | 4,526 | 0.001105 | # Copyright (C) 2014 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# " | AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, O | R CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.layout_tests.port import driver
import time
import shutil
class BrowserTestDriver(driver.Driver):
"""Object for running print preview test(s) using browser_tests."""
def __init__(self, port, worker_number, pixel_tests, no_timeout=False):
"""Invokes the constructor of driver.Driver."""
super(BrowserTestDriver, self).__init__(port, worker_number, pixel_tests, no_timeout)
def start(self, pixel_tests, per_test_args, deadline):
"""Same as Driver.start() however, it has an extra step. It waits for
a path to a file to be used for stdin to be printed by the browser test.
If a path is found by the deadline test test will open the file and
assign it to the stdin of the process that is owned by this driver's
server process.
"""
# FIXME(ivandavid): Need to handle case where the layout test doesn't
# get a file name.
new_cmd_line = self.cmd_line(pixel_tests, per_test_args)
if not self._server_process or new_cmd_line != self._current_cmd_line:
self._start(pixel_tests, per_test_args)
self._run_post_start_tasks()
self._open_stdin_path(deadline)
# Gets the path of the directory that the file for stdin communication is
# in. Since the browser test cannot clean it up, the layout test framework
# will. Everything the browser test uses is stored in the same directory as
# the stdin file, so deleting that directory recursively will remove all the
# other temp data, like the printed pdf. This function assumes the correct
# file path is sent. It won't delete files with only one component to avoid
# accidentally deleting files like /tmp.
def _open_stdin_path(self, deadline, test=False):
# FIXME(ivandavid): Come up with a way to test & see what happens when
# the file can't be opened.
path, found = self._read_stdin_path(deadline)
if found:
if test == False:
self._server_process._proc.stdin = open(path, 'wb', 0)
def _read_stdin_path(self, deadline):
# return (stdin_path, bool)
block = self._read_block(deadline)
if block.stdin_path:
return (block.stdin_path, True)
return (None, False)
def cmd_line(self, pixel_tests, per_test_args):
"""Command line arguments to run the browser test."""
cmd = self._command_wrapper(self._port.get_option('wrapper'))
cmd.append(self._port._path_to_driver())
cmd.append('--gtest_filter=PrintPreviewPdfGeneratedBrowserTest.MANUAL_LayoutTestDriver')
cmd.append('--run-manual')
cmd.append('--single_process')
cmd.extend(per_test_args)
cmd.extend(self._port.get_option('additional_driver_flag', []))
return cmd
def stop(self):
if self._server_process:
self._server_process.write('QUIT')
super(BrowserTestDriver, self).stop(self._port.driver_stop_timeout())
|
alex/changes | changes/jobs/import_repo.py | Python | apache-2.0 | 1,505 | 0 | from __future__ import absolute_import, print_function
import logging
from datetime import datetime
from changes.config import db
from changes.models import Repository, RepositoryStatus
from changes.queue.task import tracked_task
logger = logging.getLogger('repo.sync')
@tracked_task(max_retries=None)
def import_repo(repo_id, parent=None):
repo = Repository.query.get(repo_id)
if | not repo:
logger.error('Repository %s not found', repo_id)
return
vcs = repo.get_vcs()
if vcs is None:
logger.warning('R | epository %s has no VCS backend set', repo.id)
return
if repo.status == RepositoryStatus.inactive:
logger.info('Repository %s is inactive', repo.id)
return
Repository.query.filter(
Repository.id == repo.id,
).update({
'last_update_attempt': datetime.utcnow(),
}, synchronize_session=False)
db.session.commit()
if vcs.exists():
vcs.update()
else:
vcs.clone()
for commit in vcs.log(parent=parent):
revision, created = commit.save(repo)
db.session.commit()
parent = commit.id
Repository.query.filter(
Repository.id == repo.id,
).update({
'last_update': datetime.utcnow(),
'status': RepositoryStatus.active,
}, synchronize_session=False)
db.session.commit()
if parent:
import_repo.delay(
repo_id=repo.id.hex,
task_id=repo.id.hex,
parent=parent,
)
|
Sunsoo/ecogwiki | lib/bzrlib/patiencediff.py | Python | gpl-3.0 | 5,971 | 0.001507 | #!/usr/bin/env python
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# from bzrlib.lazy_import import lazy_import
# lazy_import(globals(), """
import os
import sys
import time
import difflib
# """)
__all__ = ['PatienceSequenceMatcher', 'unified_diff', 'unified_diff_files']
# This is a version of unified_diff which only adds a factory parameter
# so that you can override the default SequenceMatcher
# this has been submitted as a patch to python
def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
tofiledate='', n=3, lineterm='\n',
sequencematcher=None):
r"""
Compare two sequences of lines; generate the delta as a unified diff.
Unified diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with ---, +++, or @@) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The unidiff format normally has a header for filenames and modification
times. Any or all of these may be specified using strings for
'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. The modification
times are normally expressed in the format returned by time.ctime().
Example:
>>> for line in unified_diff('one two three four'.split(),
... 'zero one tree four'.split(), 'Original', 'Current',
... 'Sat Jan 26 23:30:50 1991', 'Fri Jun 06 10:20:52 2003',
... lineterm=''):
... print line
--- Original Sat Jan 26 23:30:50 1991
+++ Current Fri Jun 06 10:20:52 2003
@@ -1,4 +1,4 @@
+zero
one
-two
-three
+tree
four
"""
if sequencematcher is None:
import difflib
sequencematcher = difflib.SequenceMatcher
started = False
for group in sequencematcher | (None,a,b).get_grouped_opcodes(n):
if not started:
yield '--- %s %s%s' % (fromfile, fromfiledate, lineterm)
yield '+++ %s %s%s' % (tofile, tofiledate, lineterm)
started = True
i1, i2, j1, j2 = group[0][1], group[-1][2], group[0][3], group[-1][4 | ]
yield "@@ -%d,%d +%d,%d @@%s" % (i1+1, i2-i1, j1+1, j2-j1, lineterm)
for tag, i1, i2, j1, j2 in group:
if tag == 'equal':
for line in a[i1:i2]:
yield ' ' + line
continue
if tag == 'replace' or tag == 'delete':
for line in a[i1:i2]:
yield '-' + line
if tag == 'replace' or tag == 'insert':
for line in b[j1:j2]:
yield '+' + line
def unified_diff_files(a, b, sequencematcher=None):
"""Generate the diff for two files.
"""
# Should this actually be an error?
if a == b:
return []
if a == '-':
file_a = sys.stdin
time_a = time.time()
else:
file_a = open(a, 'rb')
time_a = os.stat(a).st_mtime
if b == '-':
file_b = sys.stdin
time_b = time.time()
else:
file_b = open(b, 'rb')
time_b = os.stat(b).st_mtime
# TODO: Include fromfiledate and tofiledate
return unified_diff(file_a.readlines(), file_b.readlines(),
fromfile=a, tofile=b,
sequencematcher=sequencematcher)
# try:
# from bzrlib._patiencediff_c import (
# unique_lcs_c as unique_lcs,
# recurse_matches_c as recurse_matches,
# PatienceSequenceMatcher_c as PatienceSequenceMatcher
# )
# except ImportError:
# from bzrlib._patiencediff_py import (
# unique_lcs_py as unique_lcs,
# recurse_matches_py as recurse_matches,
# PatienceSequenceMatcher_py as PatienceSequenceMatcher
# )
from bzrlib._patiencediff_py import (
unique_lcs_py as unique_lcs,
recurse_matches_py as recurse_matches,
PatienceSequenceMatcher_py as PatienceSequenceMatcher
)
def main(args):
import optparse
p = optparse.OptionParser(usage='%prog [options] file_a file_b'
'\nFiles can be "-" to read from stdin')
p.add_option('--patience', dest='matcher', action='store_const', const='patience',
default='patience', help='Use the patience difference algorithm')
p.add_option('--difflib', dest='matcher', action='store_const', const='difflib',
default='patience', help='Use python\'s difflib algorithm')
algorithms = {'patience':PatienceSequenceMatcher, 'difflib':difflib.SequenceMatcher}
(opts, args) = p.parse_args(args)
matcher = algorithms[opts.matcher]
if len(args) != 2:
print 'You must supply 2 filenames to diff'
return -1
for line in unified_diff_files(args[0], args[1], sequencematcher=matcher):
sys.stdout.write(line)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
alexurquhart/webgis | app/pubsub.py | Python | mit | 882 | 0.006803 | from redis_conn import Redis as R
from sys import stdout
import json, time
class PubSub:
def __init__(self, channel="tweets"):
self.__P = R.pubsub(ignore_subscribe_messages=True)
self.__channel = channel
# Publish tweet to subscribers
def publish(self, tweet):
ser = tweet.serialized
ser["online_count"] = R.execute_command("PUBSUB", "NUMSUB", s | elf.__channel)
data = json.dumps(ser, ensure_ascii=False)
R.publish(self.__channel, data) |
# Listen for incoming data and print to stdout
# Pause is the time between attempts to retreive messages
def listen(self, pause=0.1):
self.__P.subscribe(self.__channel)
while True:
message = self.__P.get_message()
if message:
print message["data"]
stdout.flush()
time.sleep(pause) |
rchakra3/x9115rc3 | hw/code/6/model/osyczka2.py | Python | gpl-2.0 | 2,597 | 0 | from model import Model
from helpers.candidate import Candidate
from helpers.decision import Decision
class Osyczka2(Model):
def __init__(self):
Model.__init__(self)
self.initialize_decs()
def initialize_decs(self):
dec = Decision('x1', 0, 10)
self.decs.append(dec)
dec = Decision('x2', 0, 10)
self.decs.append(dec)
dec = Decision('x3', 1, 5)
self.decs.append(dec)
dec = Decision('x4', 0, 6)
self.decs.append(dec)
dec = Decision('x5', 1, 5)
self.decs.append(dec)
dec = Decision('x6', 0, 10)
self.decs.append(dec)
def f1(self, candidate):
vec = candidate.dec_vals
part1 = 25 * ((vec[0] - 2) ** 2)
part2 = (vec[1] - 2) ** 2
part3 = (((vec[2] - 1) ** 2) * ((vec[3] - 4) ** 2))
part4 = (vec[4] - 1) ** 2
return (-(part1 + part2 + part3 + part4))
def f2(self, candidate):
vec = candidate.dec_vals
val = 0
for x in vec:
val += x ** 2
return val
def objectives(self):
return [self.f1, self.f2]
def aggregate(self, candidate):
aggr = 0
self.eval(candidate)
for score in candidate.scores:
aggr += score
return aggr
def gen_candidate(self):
for i in range(0, self.patience):
decs = [dec.generate_valid_val() for dec in self.decs]
can = Candidate(dec_vals=list(decs))
if self.ok(can):
return can
def ok(self, candidate, debug=False):
if len(candidate.dec_vals) != 6:
return False
x1 = candidate.dec_vals[0]
x2 = candidate.dec_vals[1]
x3 = candidate.dec_vals[2]
x4 = candidate.dec_vals[3]
x5 = candidate.dec_vals[4]
x6 = candidate.dec_vals[5]
if not ((x1 + x2) >= 2):
if | debug:
print "Failed 1"
return False
if not ((x1 + x2) <= 6):
if debug:
print "Failed 2"
return False
if not ((x2 - x1) <= 2):
if debug:
print "Failed 3"
return False
if not ((x1 - (3 * x2)) <= 2):
if debug:
print "Failed 4"
return False
if not ((((x3 - 3) ** 2) + x4) <= 4):
if debug:
| print "Failed 5"
return False
if not ((((x5 - 3) ** 3) + x6) >= 4):
if debug:
print "Failed 6"
return False
return True
|
scikit-nano/scikit-nano | sknano/core/crystallography/_xtal_lattices.py | Python | bsd-2-clause | 41,085 | 0.000097 | # -*- coding: utf-8 -*-
"""
=============================================================================
Crystal lattice classes (:mod:`sknano.core.crystallography._xtal_lattices`)
=============================================================================
.. currentmodule:: sknano.core.crystallography._xtal_lattices
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
__docformat__ = 'restructuredtext en'
# from abc import ABCMeta, abstractproperty
from functools import total_ordering
import numpy as np
from sknano.core import BaseClass
from sknano.core.math import Vector, Point, zhat, rotation_matrix
__all__ = ['LatticeBase', 'ReciprocalLatticeBase',
'DirectLatticeMixin', 'ReciprocalLatticeMixin',
'Direct2DLatticeMixin', 'Direct3DLatticeMixin',
'Reciprocal2DLatticeMixin', 'Reciprocal3DLatticeMixin',
'Crystal2DLattice', 'Reciprocal2DLattice',
'CrystalLattice', 'ReciprocalLattice',
'Crystal3DLattice', 'Reciprocal3DLattice']
# __all__ += ['BravaisLattice', 'Bravais3DLattice',
# 'SimpleCubicLattice', 'BodyCenteredCubicLattice',
# 'FaceCenteredCubicLattice']
@total_ordering
class LatticeBase(BaseClass):
"""Base class for crystallographic lattice objects.
Parameters
----------
nd : int
cell_matrix : array_like
orientation_matrix : array_like, optional
offset : array_like, optional
"""
def __init__(self, nd=None, cell_matrix=None, orientation_matrix=None,
offset=None):
super().__init__()
self.nd = nd
self.offset = Point(offset, nd=3)
if cell_matrix is not None and orientation_matrix is None:
orientation_matrix = cell_matrix.T * self.fractional_matrix
if orientation_matrix is None:
orientation_matrix = np.asmatrix(np.identity(3))
self.orientation_matrix = np.asmatrix(orientation_matrix)
self.lattice_type = None
def __dir__(self):
return ['nd', 'offset', 'orientation_matrix']
def __eq__(self, other):
if isinstance(other, type(self)):
return self is other or \
all([np.allclose(getattr(self, attr), getattr(other, attr))
for attr in dir(self)])
def __lt__(self, other):
if isinstance(other, type(self)):
try:
return self.cell_volume < other.cell_volume
except AttributeError:
return self.cell_area < other.cell_area
@property
def cell_matrix(self):
"""Matrix of lattice row vectors.
Same as :attr:`Crystal2DLattice.ortho_matrix`\ .T or
:attr:`Crystal3DLattice.ortho_matrix`\ .T.
"""
return (self.orientation_matrix * self.ortho_matrix).T
@property
def matrix(self):
"""Alias for \
:attr:`~sknano.core.crystallography.LatticeBase.cell_matrix`."""
return self.cell_matrix
@property
def fractional_matrix(self):
"""Transformation matrix to convert from cartesian coordinates to \
fractional coordinates."""
return np.linalg.inv(self.ortho_matrix)
@property
def metric_tensor(self):
"""Metric tensor."""
return self.cell_matrix * self.cell_matrix.T
def fractional_to_cartesian(self, fcoords):
"""Convert fractional coordinate to cartesian coordinate.
Parameters
----------
fcoords : array_like
Returns
-------
:class:`~numpy:numpy.ndarray`
"""
ccoords = self.orientation_matrix * self.ortho_matrix * \
np.asmatrix(fcoords).T + self.offset.column_matrix
try:
return ccoords.T.A.reshape((3, ))
except ValueError:
return ccoords.T.A.reshape((len(fcoords), 3))
def cartesian_to_fractional(self, ccoords):
"""Convert cartesian coordinate to fractional coordinate.
Parameters
----------
ccoords : array_like
Returns
-------
:class:`~numpy:numpy.ndarray`
"""
fcoords = np.linalg.inv(self.ortho_matrix) * \
np.linalg.inv(self.orientation_matrix) * \
(np.asmatrix(ccoords).T - self.offset.column_matrix)
try:
return fcoords.T.A.reshape((3, ))
except ValueError:
return fcoords.T.A.reshape((len(ccoords), 3))
def wrap_fractional_coordinate(self, p, epsilon=1e-6, pbc=None):
"""Wrap fractional coordinate to lie within unit cell.
Parameters
----------
p : array_like
Returns
-------
:class:`~numpy:numpy.ndarray`
"""
if pbc is None:
pbc = np.asarray(np.ones(3), dtype=bool)
p = np.ma.array(p, mask=~pbc)
p = np.ma.fmod(p, 1)
p[np.ma.where(p < 0)] += 1
p[np.ma.where(p > 1 - epsilon)] -= 1
p[np.ma.where(np.logical_or((p > 1 - epsilon), (p < epsilon)))] = 0
p.mask = np.ma.nomask
return p.tolist()
def wrap_cartesian_coordinate(self, p, pbc=None):
"""Wrap cartesian coordinate to lie within unit cell.
Parameters
----------
p : array_like
Returns
-------
:class:`~numpy:numpy.ndarray`
"""
return self.fractional_to_cartesian(
self.wrap_fractional_coordinate(self.cartesian_to_fractional(p),
pbc=pbc))
def rotate(self, angle=None, axis=None, anchor_point=None,
rot_point=None, from_vector=None, to_vector=None, degrees=False,
transform_matrix=None, verbose=False, **kwargs):
"""Rotate unit cell.
Parameters
----------
angle : float
axis : :class:`~sknano.core.math.Vector`, optional
anchor_point : :class:`~sknano.core.math.Point`, optional
rot_point : :class:`~sknano.core.math.Point`, optional
from_vector, to_vector : :class:`~sknano.core.math.Vector`, optional
degrees : bool, optional
transform_matrix : :class:`~numpy:numpy.ndarray`
See Also
--------
core.math.rotate
"""
if self.nd == 2:
axis = 'z'
if transform_matrix is None:
transform_matrix = \
np.asmatrix(
rotation_matrix(angle=angle, axis=axis,
anchor_point=anchor_point,
rot_point=rot_point,
from_vector=from_vector,
to_vector=to_vector, degrees=degrees,
verbose=verbose, **kwargs))
# print('transform_matrix: {}'.format(transform_matrix))
# transform_matrix = \
# transformation_matrix(angle=angle, axis=axis,
# anchor_point=anchor_point,
# rot_point=rot_point,
# from_vector=from_vector,
# to_vector=to_vector, degrees=degrees,
# verbose=verbose, **kwargs)
self.orientation_matrix = \
transform_matrix * self.orientation_matrix
def translate(self, t):
"""Translate lattice.
Parameters
----------
t : :class:`Vector`
See Also
--------
core.math.translate
"""
self.offset.translate(t)
class ReciprocalLatticeBase(LatticeBase):
"""Base class for crystallographic reciprocal lattice objects.
Parameters
----------
direct_lattice : :class:`Crystal2DLattice` or :class:`Crystal3DLattice`
nd : int
"""
def __init__ | (self, direct_lattice, nd, offset=None):
self._direct_lattice = direct_lattice
super().__init__(
| nd=nd, cell_matrix=self._direct_lattice.cell_matrix,
orientation_matrix=self._direct_lattice.orientation_matrix,
offset=offset)
def __getattr__ |
myint/vulture | tests/test_scavenging.py | Python | gpl-3.0 | 6,238 | 0 | from wake import Vulture
def test_function_object1():
v = Vulture()
v.scan("""\
def func():
pass
a = func
""")
assert v.defined_funcs == ['func']
assert v.unused_funcs == []
def test_function_object2():
v = Vulture()
v.scan("""\
def func():
pass
func
""")
assert v.defined_funcs == ['func']
assert v.used_funcs == ['func']
assert v.unused_funcs == []
def test_function1():
v = Vulture()
v.scan("""\
def func1(a):
pass
def func2(b):
func1(b)
""")
# Maybe someday we will support conditional execution and detect func1 too?
assert v.unused_funcs == ['func2']
assert v.defined_funcs == ['func1', 'func2']
def test_function2():
v = Vulture()
v.scan("""\
def func(a):
pass
func(5)
""")
assert v.unused_funcs == []
assert v.defined_funcs == ['func']
def test_function3():
v = Vulture()
v.scan("""\
def foo(a):
pass
b = foo(5)
""")
assert v.unused_funcs == []
assert v.defined_funcs == ['foo']
def test_function_and_method1():
v = Vulture(verbose=True)
v.scan("""\
class Bar(object):
def func(self):
pass
def func():
pass
func()
""")
# TODO: Maybe we can differentiate between method and function use.
assert v.unused_funcs == ['Bar']
assert v.defined_funcs == ['Bar', 'func', 'func']
def test_attribute1():
v = Vulture(verbose=True)
v.scan("""\
foo.bar = 1
foo.bar = 2
""")
assert v.unused_funcs == []
assert v.defined_funcs == []
assert v.defined_attrs == ['bar', 'bar']
assert v.used_attrs == []
assert v.unused_attrs == ['bar']
def test_callback1():
v = Vulture()
v.scan("""\
class Bar(object):
def foo(self):
pass
b = Bar()
b.foo
""")
assert v.used_attrs == ['foo']
assert v.unused_funcs == []
assert v.defined_funcs == ['Bar', 'foo']
def test_class1():
v = Vulture()
v.scan("""\
class Bar(object):
pass
""")
assert v.used_attrs == []
assert v.unused_funcs == ['Bar']
assert v.defined_funcs == ['Bar']
assert v.used_funcs == []
def test_class2():
v = Vulture()
v.scan("""\
class Bar():
pass
class Foo(Bar):
pass
Foo()
""")
assert v.used_attrs == []
assert v.unused_funcs == []
assert v.defined_funcs == ['Bar', 'Foo']
assert v.used_funcs == ['Bar', 'Foo']
def test_class3():
v = Vulture()
v.scan("""\
class Bar():
pass
[Bar]
""")
assert v.used_attrs == []
assert v.defined_funcs == ['Bar']
assert v.used_funcs == ['Bar']
assert v.unused_funcs == []
def test_class4():
v = Vulture()
v.scan("""\
class Bar():
pass
Bar()
""")
assert v.used_attrs == []
assert v.defined_funcs == ['Bar']
assert v.used_funcs == ['Bar']
assert v.unused_funcs == []
def test_class5():
v = Vulture()
v.scan("""\
class Bar():
pass
b = Bar()
""")
assert v.used_attrs == []
assert v.defined_funcs == ['Bar']
assert v.unused_funcs == []
def test_class6():
v = Vulture()
v.scan("""\
class Bar():
pass
a = []
a.insert(0, Bar())
""")
assert v.defined_funcs == ['Bar']
assert v.unused_funcs == []
def test_class7():
v = Vulture()
v.scan("""\
class Bar(object):
pass
class Foo(object):
def __init__(self):
self.b = xyz.Bar(self)
""")
assert v.defined_funcs == ['Bar', 'Foo']
assert v.unused_funcs == ['Foo']
def test_method1():
v = Vulture()
v.scan("""\
def __init__(self):
self.a.foo()
class Bar(object):
def foo(self):
pass
""")
assert v.defined_funcs == ['Bar', 'foo']
assert v.unused_funcs == ['Bar']
def test_variable1():
v = Vulture(verbose=True)
v.scan('a = 1\nb = a')
assert v.defined_funcs == []
assert v.used_vars == ['a']
assert v.defined_vars == ['a', 'b']
assert v.unused_vars == ['b']
def test_variable2():
v = Vulture(verbose=True)
v.scan('a = 1\nc = b.a')
assert v.defined_funcs == []
assert v.defined_vars == ['a', 'c']
assert v.used_vars == ['b']
assert v.unused_vars == ['c']
def test_variable3():
v = Vulture(verbose=True)
v.scan("a = 1\n'%(a)s' % locals()")
assert v.defined_funcs == []
assert v.defined_vars == ['a']
assert v.used_vars == ['a', 'locals']
assert v.unused_vars == []
def test_variable4():
v = Vulture(verbose=True)
v.scan('(a, b), c = (d, e, f)')
assert v.defined_funcs == []
assert v.defined_vars == ['a', 'b', 'c']
assert v.used_vars == ['d', 'e', 'f']
assert sorted(v.tuple_assign_vars) == ['a', 'b', 'c']
assert v.unused_vars == []
def test_variable5():
v = Vulture(verbose=True)
v.scan('for a, b in func(): a')
assert v.defined_funcs == []
assert v.defined_vars == ['a', 'b']
assert sorted(v.used_vars) == ['a', 'func']
assert v.tuple_assign_vars == ['a', 'b']
assert v.unused_vars == []
def test_variable6():
v = Vulture(verbose=True)
v.scan('[a for a, b in func()]')
assert v.defined_vars == ['a', 'b']
assert sorted(v.used_vars) == ['a', 'func']
assert v.tuple_assign_vars == ['a', 'b']
assert v.unused_vars == []
def test_unused_var1():
v = Vulture(verbose=True)
v.scan('_a = 1\n__b = 2\n__c__ = 3')
assert v.defined_vars == []
assert sorted(v.used_vars) == []
assert v.unused_vars == []
def test_prop1():
v = Vulture(verbose=True)
v.scan("""\
class Bar(object):
@property
def prop(self):
pass
c = Bar()
c.prop
""")
assert v.defined_funcs == ['Bar']
assert v.defined_props == ['prop']
assert v.unused_props == []
def test_prop2():
v = Vulture(verbose=True)
v.scan("""\
class Bar(object):
@property
def prop(self):
pass
prop | = 1
""")
assert v.defined_funcs == ['Bar']
assert v.defined_props == ['prop']
assert v.unused_props == ['prop']
assert v.defined_vars == ['prop']
def test_object_attribute():
v = Vulture(verb | ose=True)
v.scan("""\
class Bar(object):
def __init__(self):
self.a = []
""")
assert v.defined_funcs == ['Bar']
assert v.defined_vars == []
assert v.defined_attrs == ['a']
assert v.used_attrs == []
assert v.unused_attrs == ['a']
|
arvenil/resume | node_modules/node-gyp/gyp/pylib/gyp/generator/compile_commands_json.py | Python | mit | 4,591 | 0.000871 | # Copyright (c) 201 | 6 Ben Noordhuis <info@bnoordhuis.nl>. All rights reserved.
# Use of this source code is governed b | y a BSD-style license that can be
# found in the LICENSE file.
import gyp.common
import gyp.xcode_emulation
import json
import os
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
generator_supports_multiple_toolsets = True
generator_wants_sorted_dependencies = False
# Lifted from make.py. The actual values don't matter much.
generator_default_variables = {
"CONFIGURATION_NAME": "$(BUILDTYPE)",
"EXECUTABLE_PREFIX": "",
"EXECUTABLE_SUFFIX": "",
"INTERMEDIATE_DIR": "$(obj).$(TOOLSET)/$(TARGET)/geni",
"PRODUCT_DIR": "$(builddir)",
"RULE_INPUT_DIRNAME": "%(INPUT_DIRNAME)s",
"RULE_INPUT_EXT": "$(suffix $<)",
"RULE_INPUT_NAME": "$(notdir $<)",
"RULE_INPUT_PATH": "$(abspath $<)",
"RULE_INPUT_ROOT": "%(INPUT_ROOT)s",
"SHARED_INTERMEDIATE_DIR": "$(obj)/gen",
"SHARED_LIB_PREFIX": "lib",
"STATIC_LIB_PREFIX": "lib",
"STATIC_LIB_SUFFIX": ".a",
}
def IsMac(params):
return "mac" == gyp.common.GetFlavor(params)
def CalculateVariables(default_variables, params):
default_variables.setdefault("OS", gyp.common.GetFlavor(params))
def AddCommandsForTarget(cwd, target, params, per_config_commands):
output_dir = params["generator_flags"].get("output_dir", "out")
for configuration_name, configuration in target["configurations"].items():
if IsMac(params):
xcode_settings = gyp.xcode_emulation.XcodeSettings(target)
cflags = xcode_settings.GetCflags(configuration_name)
cflags_c = xcode_settings.GetCflagsC(configuration_name)
cflags_cc = xcode_settings.GetCflagsCC(configuration_name)
else:
cflags = configuration.get("cflags", [])
cflags_c = configuration.get("cflags_c", [])
cflags_cc = configuration.get("cflags_cc", [])
cflags_c = cflags + cflags_c
cflags_cc = cflags + cflags_cc
defines = configuration.get("defines", [])
defines = ["-D" + s for s in defines]
# TODO(bnoordhuis) Handle generated source files.
extensions = (".c", ".cc", ".cpp", ".cxx")
sources = [s for s in target.get("sources", []) if s.endswith(extensions)]
def resolve(filename):
return os.path.abspath(os.path.join(cwd, filename))
# TODO(bnoordhuis) Handle generated header files.
include_dirs = configuration.get("include_dirs", [])
include_dirs = [s for s in include_dirs if not s.startswith("$(obj)")]
includes = ["-I" + resolve(s) for s in include_dirs]
defines = gyp.common.EncodePOSIXShellList(defines)
includes = gyp.common.EncodePOSIXShellList(includes)
cflags_c = gyp.common.EncodePOSIXShellList(cflags_c)
cflags_cc = gyp.common.EncodePOSIXShellList(cflags_cc)
commands = per_config_commands.setdefault(configuration_name, [])
for source in sources:
file = resolve(source)
isc = source.endswith(".c")
cc = "cc" if isc else "c++"
cflags = cflags_c if isc else cflags_cc
command = " ".join(
(
cc,
defines,
includes,
cflags,
"-c",
gyp.common.EncodePOSIXShellArgument(file),
)
)
commands.append(dict(command=command, directory=output_dir, file=file))
def GenerateOutput(target_list, target_dicts, data, params):
per_config_commands = {}
for qualified_target, target in target_dicts.items():
build_file, target_name, toolset = gyp.common.ParseQualifiedTarget(
qualified_target
)
if IsMac(params):
settings = data[build_file]
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(settings, target)
cwd = os.path.dirname(build_file)
AddCommandsForTarget(cwd, target, params, per_config_commands)
output_dir = params["generator_flags"].get("output_dir", "out")
for configuration_name, commands in per_config_commands.items():
filename = os.path.join(output_dir, configuration_name, "compile_commands.json")
gyp.common.EnsureDirExists(filename)
fp = open(filename, "w")
json.dump(commands, fp=fp, indent=0, check_circular=False)
def PerformBuild(data, configurations, params):
pass
|
gprMax/gprMax | gprMax/pml_updates/pml_updates_electric_HORIPML_gpu.py | Python | gpl-3.0 | 47,831 | 0.003596 | # Copyright (C) 2015-2022: The University of Edinburgh
# Authors: Craig Warren and Antonis Giannopoulos
#
# This file is part of gprMax.
#
# gprMax is free software: you can redistribute it and/or modify
# it under the terms of the GNU GenRAl Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gprMax is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU GenRAl Public License for more details.
#
# You should have received a copy of the GNU GenRAl Public License
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
from string import Template
kernels_template_pml_electric_HORIPML = Template("""
// Macros for converting subscripts to linear index:
#define INDEX2D_R(m, n) (m)*(NY_R)+(n)
#define INDEX2D_MAT(m, n) (m)*($NY_MATCOEFFS)+(n)
#define INDEX3D_FIELDS(i, j, k) (i)*($NY_FIELDS)*($NZ_FIELDS)+(j)*($NZ_FIELDS)+(k)
#define INDEX4D_ID(p, i, j, k) (p)*($NX_ID)*($NY_ID)*($NZ_ID)+(i)*($NY_ID)*($NZ_ID)+(j)*($NZ_ID)+(k)
#define INDEX4D_PHI1(p, i, j, k) (p)*(NX_PHI1)*(NY_PHI1)*(NZ_PHI1)+(i)*(NY_PHI1)*(NZ_PHI1)+(j)*(NZ_PHI1)+(k)
#define INDEX4D_PHI2(p, i, j, k) (p)*(NX_PHI2)*(NY_PHI2)*(NZ_PHI2)+(i)*(NY_PHI2)*(NZ_PHI2)+(j)*(NZ_PHI2)+(k)
// Material coefficients (read-only) in constant memory (64KB)
__device__ __constant__ $REAL updatecoeffsE[$N_updatecoeffsE];
__global__ void order1_xminus(int xs, int xf, int ys, int yf, int zs, int zf, int NX_PHI1, int NY_PHI1, int NZ_PHI1, int NX_PHI2, int NY_PHI2, int NZ_PHI2, int NY_R, const unsigned int* __restrict__ ID, const $REAL* __restrict__ Ex, $REAL *Ey, $REAL *Ez, const $REAL* __restrict__ Hx, const $REAL* __restrict__ Hy, const $REAL* __restrict__ Hz, $REAL *PHI1, $REAL *PHI2, const $REAL* __restrict__ RA, const $REAL* __restrict__ RB, const $REAL* __restrict__ RE, const $REAL* __restrict__ RF, $REAL d) {
// This function updates the Ey and Ez field components for the xminus slab.
//
// Args:
// xs, xf, ys, yf, zs, zf: Cell coordinates of PML slab
// NX_PHI, NY_PHI, NZ_PHI, NY_R: Dimensions of PHI1, PHI2, and R PML arrays
// ID, E, H: Access to ID and field component arrays
// Phi, RA, RB, RE, RF: Access to PML electric coefficient arrays
// d: Spatial discretisation, e.g. dx, dy or dz
// Obtain the linear index corresponding to the current thread
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Convert the linear index to subscripts for PML PHI1 (4D) arrays
int p1 = idx / (NX_PHI1 * NY_PHI1 * NZ_PHI1);
int i1 = (idx % (NX_PHI1 * NY_PHI1 * NZ_PHI1)) / (NY_PHI1 * NZ_PHI1);
int j1 = ((idx % (NX_PHI1 * NY_PHI1 * NZ_PHI1)) % (NY_PHI1 * NZ_PHI1)) / NZ_PHI1;
int k1 = ((idx % (NX_PHI1 * NY_PHI1 * NZ_PHI1)) % (NY_PHI1 * NZ_PHI1)) % NZ_PHI1;
// Convert the linear index to subscripts for PML PHI2 (4D) arrays
int p2 = idx / (NX_PHI2 * NY_PHI2 * NZ_PHI2);
int i2 = (idx % (NX_PHI2 * NY_PHI2 * NZ_PHI2)) / (NY_PHI2 * NZ_PHI2);
int j2 = ((idx % (NX_PHI2 * NY_PHI2 * NZ_PHI2)) % (NY_PHI2 * NZ_PHI2)) / NZ_PHI2;
int k2 = ((idx % (NX_PHI2 * NY_PHI2 * NZ_PHI2)) % (NY_PHI2 * NZ_PHI2)) % NZ_PHI2;
$REAL RA01, RB0, RE0, RF0, dHy, dHz;
$REAL dx = d;
int ii, jj, kk, materialEy, materialEz;
int nx = xf - xs;
int ny = yf - ys;
int nz = zf - zs;
if (p1 == 0 && i1 < nx && j1 < ny && k1 < nz) {
// Subscripts for field arrays
ii = xf - i1;
jj = j1 + ys;
kk = k1 + zs;
// PML coefficients
RA01 = RA[INDEX2D_R(0,i1)] - 1;
RB0 = RB[INDEX2D_R(0,i1)];
RE0 = RE[INDEX2D_R(0,i1)];
RF0 = RF[INDEX2D_R(0,i1)];
// Ey
materialEy = ID[INDEX4D_ID(1,ii,jj,kk)];
dHz = (Hz[INDEX3D_FIELDS(ii,jj,kk)] - Hz[INDEX3D_FIELDS(ii-1,jj,kk)]) / dx;
Ey[INDEX3D_FIELDS(ii,jj,kk)] = Ey[INDEX3D_FIELDS(ii,jj,kk)] - updatecoeffsE[INDEX2D_MAT(materialEy,4)] * (RA01 * dHz + RB0 * PHI1[INDEX4D_PHI1(0,i1,j1,k1)]);
PHI1[INDEX4D_PHI1(0,i1,j1,k1)] = RE0 * PHI1[INDEX4D_PHI1(0,i1,j1,k1)] - RF0 * dHz;
}
if (p2 == 0 && i2 < nx && j2 < ny && k2 < nz) {
// Subscripts for field arrays
ii = xf - i2;
jj = j2 + ys;
kk = k2 + zs;
// PML coefficients
RA01 = RA[INDEX2D_R(0,i2)] - 1;
RB0 = RB[INDEX2D_R(0,i2)];
RE0 = RE[INDEX2D_R(0,i2)];
RF0 = RF[INDEX2D_R(0,i2)];
// Ez
materialEz = ID[INDEX4D_ID(2,ii,jj,kk)];
dHy = (Hy[INDEX3D_FIELDS(ii,jj,kk)] - Hy[INDEX3D_FIELDS(ii-1,jj,kk)]) / dx;
Ez[INDEX3D_FIELDS(ii,jj,kk)] = Ez[INDEX3D_FIELDS(ii,jj,kk)] + updatecoeffsE[INDEX2D_MAT(materialEz,4)] * (RA01 * dHy + RB0 * PHI2[INDEX4D_PHI2(0,i2,j2,k2)]);
PHI2[INDEX4D_PHI2(0,i2,j2,k2)] = RE0 * PHI2[INDEX4D_PHI2(0,i2,j2,k2)] - RF0 * dHy;
}
}
__global__ void order2_xminus(int xs, int xf, int ys, int yf, int zs, int zf, int NX_PHI1, int NY_PHI1, int NZ_PHI1, int NX_PHI2, int NY_PHI2, int NZ_PHI2, int NY_R, const unsigned int* __restrict__ ID, const $REAL* __restrict__ Ex, $REAL *Ey, $REAL *Ez, const $REAL* __restrict__ Hx, const $REAL* __restrict__ Hy, const $REAL* __restrict__ Hz, $REAL *PHI1, $REAL *PHI2, const $REAL* __restrict__ RA, const $REAL* __restrict__ RB, const $REAL* __restrict__ RE, const $REAL* __restrict__ RF, $REAL d) {
// This function updates the Ey and Ez field components for the xminus slab.
//
// Args:
// xs, xf, ys, yf, zs, zf: Cell coordinates of PML slab
// NX_PHI, NY_PHI, NZ_PHI, NY_R: Dimensions of PHI1, PHI2, and R PML arrays
// ID, E, H: Access to ID and field component arrays
// Phi, RA, RB, RE, RF: Access to PML electric coefficient arrays
// d: Spatial discretisation, e.g. dx, dy or dz
// Obtain the linear index corresponding to the current thread
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Convert the linear | index to subscripts for PML PHI1 (4D) arrays
int p1 = idx / (NX_PHI1 * NY_PHI1 * NZ_PHI1);
int i1 = (idx % (NX_PHI1 * NY_PHI1 * NZ_PHI1)) / (NY_PHI1 * NZ_PHI1);
int j1 = ((idx % (NX_PHI1 * NY_PHI1 * NZ_PHI1)) % (NY_PHI1 * NZ_PHI1)) / NZ_ | PHI1;
int k1 = ((idx % (NX_PHI1 * NY_PHI1 * NZ_PHI1)) % (NY_PHI1 * NZ_PHI1)) % NZ_PHI1;
// Convert the linear index to subscripts for PML PHI2 (4D) arrays
int p2 = idx / (NX_PHI2 * NY_PHI2 * NZ_PHI2);
int i2 = (idx % (NX_PHI2 * NY_PHI2 * NZ_PHI2)) / (NY_PHI2 * NZ_PHI2);
int j2 = ((idx % (NX_PHI2 * NY_PHI2 * NZ_PHI2)) % (NY_PHI2 * NZ_PHI2)) / NZ_PHI2;
int k2 = ((idx % (NX_PHI2 * NY_PHI2 * NZ_PHI2)) % (NY_PHI2 * NZ_PHI2)) % NZ_PHI2;
$REAL RA0, RB0, RE0, RF0, RA1, RB1, RE1, RF1, RA01, dHy, dHz;
$REAL dx = d;
int ii, jj, kk, materialEy, materialEz;
int nx = xf - xs;
int ny = yf - ys;
int nz = zf - zs;
if (p1 == 0 && i1 < nx && j1 < ny && k1 < nz) {
// Subscripts for field arrays
ii = xf - i1;
jj = j1 + ys;
kk = k1 + zs;
// PML coefficients
RA0 = RA[INDEX2D_R(0,i1)];
RB0 = RB[INDEX2D_R(0,i1)];
RE0 = RE[INDEX2D_R(0,i1)];
RF0 = RF[INDEX2D_R(0,i1)];
RA1 = RA[INDEX2D_R(1,i1)];
RB1 = RB[INDEX2D_R(1,i1)];
RE1 = RE[INDEX2D_R(1,i1)];
RF1 = RF[INDEX2D_R(1,i1)];
RA01 = RA[INDEX2D_R(0,i1)] * RA[INDEX2D_R(1,i1)] - 1;
// Ey
materialEy = ID[INDEX4D_ID(1,ii,jj,kk)];
dHz = (Hz[INDEX3D_FIELDS(ii,jj,kk)] - Hz[INDEX3D_FIELDS(ii-1,jj,kk)]) / dx;
Ey[INDEX3D_FIELDS(ii,jj,kk)] = Ey[INDEX3D_FIELDS(ii,jj,kk)] - updatecoeffsE[INDEX2D_MAT(materialEy,4)] * (RA01 * dHz + RA1 * RB0 * PHI1[INDEX4D_PHI1(0,i1,j1,k1)] + RB1 * PHI1[INDEX4D_PHI1(1,i1,j1,k1)]);
PHI1[INDEX4D_PHI1(1,i1,j1,k1)] = RE1 * PHI1[INDEX4D_PHI1(1,i1,j1,k1)] - RF1 * (RA0 * dHz + RB0 * PHI1[INDEX4D_PHI1(0,i1,j1,k1)]);
PHI1[INDEX4D_PHI1(0,i1,j1,k1)] = RE0 * PHI1[INDEX4D_PHI1(0,i1,j1,k1)] - RF0 * dHz;
}
if (p2 == 0 && i2 < nx && j2 < ny && k2 < nz) {
// |
Eric89GXL/scipy | scipy/sparse/linalg/isolve/lsmr.py | Python | bsd-3-clause | 15,128 | 0.000463 | """
Copyright (C) 2010 David Fong and Michael Saunders
LSMR uses an iterative method.
07 Jun 2010: Documentation updated
03 Jun 2010: First release version in Python
David Chin-lung Fong clfong@stanford.edu
Institute for Computational and Mathematical Engineering
Stanford University
Michael Saunders saunders@stanford.edu
Systems Optimization Laboratory
Dept of MS&E, Stanford University.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['lsmr']
from numpy import zeros, infty, atleast_1d
from numpy.linalg import norm
from math import sqrt
from scipy.sparse.linalg.interface import aslinearoperator
from .lsqr import _sym_ortho
def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
maxiter=None, show=False, x0=None):
"""Iterative solver for least-squares problems.
lsmr solves the system of linear equations ``Ax = b``. If the system
is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.
A is a rectangular matrix of dimension m-by-n, where all cases are
allowed: m = n, m > n, or m < n. B is a vector of length m.
The matrix A may be dense or sparse (usually sparse).
Parameters
----------
A : {matrix, sparse matrix, ndarray, LinearOperator}
Matrix A in the linear system.
b : array_like, shape (m,)
Vector b in the linear system.
damp : float
Damping factor for regularized least-squares. `lsmr` solves
the regularized least-squares problem::
min ||(b) - ( A )x||
||(0) (damp*I) ||_2
where damp is a scalar. If damp is None or 0, the system
is solved without regularization.
atol, btol : float, optional
Stopping tolerances. `lsmr` continues iterations until a
certain backward error estimate is smaller than some quantity
depending on atol and btol. Let ``r = b - Ax`` be the
residual vector for the current approximate solution ``x``.
If ``Ax = b`` seems to be consistent, ``lsmr`` terminates
when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
Otherwise, lsmr terminates when ``norm(A^{T} r) <=
atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (say),
the final ``norm(r)`` should be accurate to about 6
digits. (The final x will usually have fewer correct digits,
depending on ``cond(A)`` and the size of LAMBDA.) If `atol`
or `btol` is None, a default value of 1.0e-6 will be used.
Ideally, they should be estimates of the relative error in the
entries of A and B respectively. For example, if the entries
of `A` have 7 correct digits, set atol = 1e-7. This prevents
the algorithm from doing unnecessary work beyond the
uncertainty of the input data.
conlim : float, optional
`lsmr` terminates if an estimate of ``cond(A)`` exceeds
`conlim`. For compatible systems ``Ax = b``, conlim could be
as large as 1.0e+12 (say). For least-squares problems,
`conlim` should be less than 1.0e+8. If `conlim` is None, the
default value is 1e+8. Maximum precision can be obtained by
setting ``atol = btol = conlim = 0``, but the number of
iterations may then be excessive.
maxiter : int, optional
`lsmr` terminates if the number of iterations reaches
`maxiter`. The default is ``maxiter = min(m, n)``. For
ill-conditioned systems, a larger value of `maxiter` may be
needed.
show : bool, optional
Print iterations logs if ``show=True``.
x0 : array_like, shape (n,), optional
Initial guess of x, if None zeros are used.
.. versionadded:: 1.0.0
Returns
-------
x : ndarray of float
Least-square solution returned.
istop : int
istop gives the reason for stopping::
istop = 0 means x=0 is a solution. If x0 was given, then x=x0 is a
solution.
= 1 means x is an approximate solution to A*x = B,
according to atol and btol.
= 2 means x approximately solves the least-squares problem
according to atol.
= 3 means COND(A) seems to be greater than CONLIM.
= 4 is the same as 1 with atol = btol = eps (machine
precision)
= 5 is the same as 2 with atol = eps.
= 6 is the same as 3 with CONLIM = 1/eps.
= 7 means ITN reached maxiter before the other stopping
conditions were satisfied.
itn : int
Number of iterations used.
normr : float
``norm(b-Ax)``
normar : float
``norm(A^T (b - Ax))``
norma : float
``norm(A)``
conda : float
Condition number of A.
normx : float
``norm(x)``
Notes
-----
.. versionadded:: 0.11.0
References
----------
.. [1] D. C.-L. Fong and M. A. Saunders,
"LSMR: An iterative algorithm for sparse least-squares problems",
SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.
https://arxiv.org/abs/1006.0758
.. [2] LSMR Software, https://web.stanford.edu/group/SOL/software/lsmr/
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import lsmr
>>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float)
The first example has the trivial solution `[0, 0]`
>>> b = np.array([0., 0., 0.], dtype=float)
>>> x, istop, itn, normr = lsmr(A, b)[:4]
>>> istop
0
>>> x
array([ 0., 0.])
The stopping code `istop=0` returned indicates that a vector of zeros was
found as a solution. The returned solution `x` indeed contains `[0., 0.]`.
The next example has a non-trivial solution:
>>> b = np.array([1., 0., -1.], dtype=float)
>>> x, istop, itn, normr = lsmr(A, b)[:4]
>>> istop
1
>>> x
array([ 1., -1.])
>>> itn
1
>>> normr
4.440892098500627e-16
As indicated by `istop=1`, `lsmr` found a solution obeying the tolerance
limits. The given solution `[1., -1.]` obviously solves the equation. The
remaining return values include information about the number of iterations
(`itn=1`) and the remaining difference of left and right side of the solved
equation.
The final example demonstrates the behavior in the case where there is no
solution for the equation:
>>> b = np.array([1., 0.01, -1.], dtype=float)
>>> x, istop, itn, normr = lsmr(A, b)[:4]
>>> istop
2
>>> x
array([ 1.00333333, -0.99666667])
>>> A.dot(x)-b
array([ 0.00333333, -0.00333333, 0.00333333])
>>> normr
0.005773502691896255
`istop` indicates that the system is inconsistent and thus `x` is rather an
approximate solution to the corresponding least-squares problem. `normr`
contains the minimal distance that was found.
"""
A = aslinearoperator(A)
b = atleast_1d(b)
if b.ndim > 1:
b = b.squeeze()
msg = ('The exact solution is x = 0, or x = x0, if x0 was given ',
'Ax - b is small enough, given atol, btol ',
'The least-squares solution is good enough, given atol ',
'The estimate of cond(Abar) has exceeded conlim ',
'Ax - b is small enough for this machine ',
'The least-squares solution is good enough for this machine',
'Cond(Abar) seems to be too large for this machine ',
'The iteration limit has been reached ')
hdg1 = ' itn x(1) | norm r norm A''r'
| hdg2 = ' compatible LS norm A cond A'
pfreq = 20 # print frequency (for repeating the heading)
pcount = 0 # print counter
m, n = A.shape
# stores the num of singular values
minDim = min([m, n])
if maxiter is None:
maxiter = minDim
if show:
print(' ')
print('LSMR Least-squares solution of Ax = b\n')
print(' |
nathanbjenx/cairis | cairis/test/test_RiskLevelAPI.py | Python | apache-2.0 | 5,044 | 0.007534 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import sys
if (sys.version_info > (3,)):
from urllib.parse import quote
else:
from urllib import quote
from io import StringIO
import os
import json
import jsonpickle
from cairis.test.CairisDaemonTestCase import CairisDaemonTestCase
from cairis.mio.ModelImport import importModelFile
from cairis.tools.JsonConverter import json_deserialize
import os
__author__ = 'Shamal Faily'
class RiskLevelAPITests(CairisDaemonTestCase):
@classmethod
def setUpClass(cls):
importModelFile(os.environ['CAIRIS_SRC'] + '/../examples/exemplars/ACME_Water/ACME_Water.xml',1,'test')
def setUp(self):
self.logger = logging.getLogger(__name__)
self.existing_asset_name = 'ICT PC'
self.existing_threat_name = 'Password enumeration'
def test_get_risk_level(self):
method = 'test_get_risk_level'
url = '/api/risk_level/asset/%s?session_id=test' % quote(self.existing_asset_name)
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.get(url)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
level = jsonpickle.decode(responseData)
self.assertIsNotNone(level, 'No results after deserialization')
self.assertIsInstance(level, int, 'The result is not an integer as expected')
self.assertEqual(level, 9)
def test_get_risk_level_by_environment(self):
method = 'test_get_risk_level_by_environment'
url = '/api/risk_level/asset/ICT%20Application/environment/Day?session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.get(url)
if (sys.version_info > (3,)):
responseData = rv.dat | a.decode('utf-8')
else:
| responseData = rv.data
level = jsonpickle.decode(responseData)
self.assertIsNotNone(level, 'No results after deserialization')
self.assertIsInstance(level, int, 'The result is not an integer as expected')
self.assertEqual(level, 9)
url = '/api/risk_level/asset/ICT%20Application/environment/Night?session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.get(url)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
level = jsonpickle.decode(responseData)
self.assertIsNotNone(level, 'No results after deserialization')
self.assertIsInstance(level, int, 'The result is not an integer as expected')
self.assertEqual(level, 9)
def test_get_risk_threat_level(self):
method = 'test_get_risk_level'
url = '/api/risk_level/asset/threat_type/' + quote(self.existing_asset_name) + '/' + quote(self.existing_threat_name) + '?session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.get(url)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
level = jsonpickle.decode(responseData)
self.assertIsNotNone(level, 'No results after deserialization')
self.assertIsInstance(level, int, 'The result is not an integer as expected')
self.assertEqual(level, 9)
def test_get_risk_threat_level_by_environment(self):
method = 'test_get_risk_level_by_environment'
url = '/api/risk_level/asset/threat_type/ICT%20Application/Enumeration/environment/Day?session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.get(url)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
level = jsonpickle.decode(responseData)
self.assertIsNotNone(level, 'No results after deserialization')
self.assertIsInstance(level, int, 'The result is not an integer as expected')
self.assertEqual(level, 9)
url = '/api/risk_level/asset/threat_type/ICT%20Application/Enumeration/environment/Night?session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.get(url)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
level = jsonpickle.decode(responseData)
self.assertIsNotNone(level, 'No results after deserialization')
self.assertIsInstance(level, int, 'The result is not an integer as expected')
self.assertEqual(level, 0)
|
tuistudio/tuistudio.com | tuistudio/utils/filters.py | Python | apache-2.0 | 530 | 0.002 | # coding: utf-8
import datetime
def timesince(value):
"""Friendly time gap"""
now = datetime.datetime.now()
delta = now - value
if delta.days > 365:
return '%d年前' % | (delta.days / 365)
if delta.days > 30:
return '%d个月前' % (delta.days / 30)
if delta.days > 0:
return '%d天前' % delta.days
if delta.seconds > 3600:
return '%d小时前' % (del | ta.seconds / 3600)
if delta.seconds > 60:
return '%d分钟前' % (delta.seconds / 60)
return '刚刚' |
codefisher/codefisher_apps | online_status/templatetags/online_status_tags.py | Python | mit | 2,065 | 0.009201 | import datetime
from django.conf import settings
from django import template
from django.core.cache import cache
from online_status.status import CACHE_USERS, CACHE_PREFIX_ANONYM_USER, TIME_OFFLINE, ONLY_LOGGED_USERS, status_for_user
from django.contrib.sessions.models import Session
register = template.Library()
@register.inclusion_tag('online_status/online_users_list.html')
def online_users(limit=None):
return _online_user(limit, True)
@register.inclusion_tag('online_status/online_users_count.html')
def online_user_count(limit=None):
data = _online_user(limit, False)
return {'onlineanonymusers': len(data.get('onlineanonymusers')),
'onlineusers': len(data.get('onlineusers')),}
def _online_user(limit, logged_only):
"""Ren | ders a list of OnlineStatus instances"""
onlineusers = cache.get(CACHE_USERS, [])
onlineanonymusers = []
if not ONLY_LOGGED_USERS and not logged_only:
now = datetime.datetime.now()
sessions = Session.objects.filter(expire_date__gte = now + datetime.time | delta(0, settings.SESSION_COOKIE_AGE - TIME_OFFLINE)).values_list('session_key', flat = True)
onlineanonymusers = [x for x in [cache.get(CACHE_PREFIX_ANONYM_USER % session_key, None) for session_key in sessions] if x is not None]
onlineusers = [item for item in cache.get(CACHE_USERS, []) if item.status in (0, 1) and item.session in sessions]
if onlineanonymusers and limit:
onlineanonymusers = onlineanonymusers[:limit]
if onlineusers and limit:
onlineusers = onlineusers[:limit]
return {'onlineanonymusers': onlineanonymusers,
'onlineusers': onlineusers,}
@register.inclusion_tag('online_status/user_status.html')
def user_status(user):
"""Renders an OnlineStatus instance for User"""
status = status_for_user(user)
return {'onlinestatus': status,}
@register.inclusion_tag('online_status/user_status_tag.html')
def user_status_tag(user):
status = status_for_user(user)
return {'onlinestatus': status,} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.