repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
SF-Zhou/TinyDNN | tiny_dnn/net/__init__.py | Python | gpl-3.0 | 22 | 0 | from . | net import | Net
|
MungoRae/home-assistant | tests/components/config/test_init.py | Python | apache-2.0 | 1,851 | 0 | """Test config init."""
import asyncio
from unittest.mock import patch
import pytest
from homeassistant.const import EVENT_COMPO | NENT_LOADED
from homeassistant.setup import async_setup_component, ATTR_COMPONENT
from homeassistant.components import config
from tests.common import mock_http_component, mock_coro, mock_component
@pytest.fixture(autouse=True)
def stub_http(hass):
"""Stub the HTTP component."""
mock_http_component(hass)
@asyncio.coroutine
def test_config_setup(hass, loop):
"""Test it sets up hassbian."""
yield from async_setup_component(hass, 'config', {})
assert 'config' i | n hass.config.components
@asyncio.coroutine
def test_load_on_demand_already_loaded(hass, test_client):
"""Test getting suites."""
mock_component(hass, 'zwave')
with patch.object(config, 'SECTIONS', []), \
patch.object(config, 'ON_DEMAND', ['zwave']), \
patch('homeassistant.components.config.zwave.async_setup') as stp:
stp.return_value = mock_coro(True)
yield from async_setup_component(hass, 'config', {})
yield from hass.async_block_till_done()
assert 'config.zwave' in hass.config.components
assert stp.called
@asyncio.coroutine
def test_load_on_demand_on_load(hass, test_client):
"""Test getting suites."""
with patch.object(config, 'SECTIONS', []), \
patch.object(config, 'ON_DEMAND', ['zwave']):
yield from async_setup_component(hass, 'config', {})
assert 'config.zwave' not in hass.config.components
with patch('homeassistant.components.config.zwave.async_setup') as stp:
stp.return_value = mock_coro(True)
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {ATTR_COMPONENT: 'zwave'})
yield from hass.async_block_till_done()
assert 'config.zwave' in hass.config.components
assert stp.called
|
shucommon/little-routine | python/AI/tensorflow/MNIST_data/download.py | Python | gpl-3.0 | 745 | 0.001342 | import urllib.request
def download(path):
print('Beginning file download with urllib2...')
| url = []
url.append('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz')
url.append('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz')
url.append('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz')
url.append('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz')
name = []
name.append('train-images-idx3-ubyte.gz')
name.append('train-labels | -idx1-ubyte.gz')
name.append('t10k-images-idx3-ubyte.gz')
name.append('t10k-labels-idx1-ubyte.gz')
for i in range(4):
print("downloading " + name[i])
urllib.request.urlretrieve(url[i], path + '/' + name[i])
|
gaiusm/pge | examples/springs/catapult.py | Python | gpl-3.0 | 2,508 | 0.027113 | #!/usr/bin/env python3
import pge, sys
from pygame.locals import *
print("starting catapult")
# pge.batch ()
pge.interactive ()
t = pge.rgb (1.0/2.0, 2.0/3.0, 3.0/4.0)
wood_light = pge.rgb (166.0/256.0, 124.0/256.0, 54.0/256.0)
wood_dark = pge.rgb (76.0/256.0, 47.0/256.0, 0.0)
red = pge.rgb (1.0, 0.0, 0.0)
green = pge.rgb (0.0, 1.0, 0.0)
blue = pge.rgb (0.0, 0.0, 1.0)
blue_dark = pge.rgb (0.1, 0.1, 0.8)
steel = pge.rgb (0.5, 0.5, 0.5)
copper = pge.rgb (0.5, 0.3, 0.2)
gold = pge.rgb (0.8, 0.6, 0.15)
ball_size = 0.02
boarder = 0.01
white = pge.rgb (1.0, 1.0, 1.0)
gap = 0.01
captured = None
sides = []
yellow = pge.rgb (0.8, 0.6, 0.15)
fps_text = None
last_fps = 0
def myquit (e):
print("goodbye")
sys.exit (0)
def key_pressed (e):
if e.key == K_ESCAPE:
myquit (e)
def placeBoarders (thickness, color):
print("placeBoarders")
e1 = pge.box (0.0, 0.0, 1.0, thickness, color).fix ()
e2 = pge.box (0.0, 0.0, thickness, 1.0, color).fix ()
e3 = pge.box (1.0-thickness, 0.0, thickness, 1.0, color).fix ()
e4 = pge.box (0.0, 1.0-thickness, 1.0, thickness, color).fix ()
return e1, e2, e3, e4
def placeBall (kind, x, y, r):
return pge.circle (x, y, r, kind)
def snap_it (e, o):
global connection
connection.rm ()
o.rm ()
def drop_gb (e, o):
gb = placeBall (steel, 0.7, 0.92, 0.03).mass (2.0)
# pge.at_time (3.0, drop_gb)
def mouse_clicked (e):
global connection
mouse = pge.pyg_to_unit_coord (e.pos)
if e.button == 1:
# left mouse button clicked
spring_power = 100 | 0.0
damping = 10.0
snap_length = 0.1
projectile = placeBall (wood_dark, mouse[0], mouse[1], 0.03).mass (0.9)
bungee = pge.spring (connection, projectile, spring_power, damping, snap_length).draw (yellow, 0.002)
bungee.when (snap_length, snap_it)
def main ():
global gb, sides, connection
placeBoarders | (0.01, wood_dark)
connection = placeBall (wood_light, 0.75, 0.45, 0.01).fix ()
print("before run")
pge.record ()
pge.draw_collision (True, False)
pge.collision_colour (red)
pge.gravity ()
pge.dump_world ()
pge.slow_down (6.0) # slows down real time by a factor of
pge.register_handler (myquit, [QUIT])
pge.register_handler (key_pressed, [KEYDOWN])
pge.register_handler (mouse_clicked, [MOUSEBUTTONDOWN])
pge.display_set_mode ([1000, 1000])
pge.local_fps ()
pge.run (10.0)
pge.finish_record ()
print("before main()")
main ()
|
pkess/beets | test/_common.py | Python | mit | 10,605 | 0.000094 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Some common functionality for beets' test cases."""
from __future__ import division, absolute_import, print_function
import time
import sys
import os
import tempfile
import shutil
import six
import unittest
from contextlib import contextmanager
# Mangle the search path to include the beets sources.
sys.path.insert(0, '..')
import beets.library # noqa: E402
from beets import importer, logging # noqa: E402
from beets.ui import commands # noqa: E402
from beets import util # noqa: E402
import beets # noqa: E402
# Make sure the development versions of the plugins are used
import beetsplug # noqa: E402
beetsplug.__path__ = [os.path.abspath(
os.path.join(__file__, '..', '..', 'beetsplug')
)]
# Test resources path.
RSRC = util.bytestring_path(os.path.join(os.path.dirname(__file__), 'rsrc'))
PLUGINPATH = os.path.join(os.path.dirname(__file__), 'rsrc', 'beetsplug')
# Propagate to root logger so nosetest can capture it
log = logging.getLogger('beets')
log.propagate = True
log.setLevel(logging.DEBUG)
# Dummy item creation.
_item_ident = 0
# OS feature test.
HAVE_SYMLINK = sys.platform != 'win32'
HAVE_HARDLINK = sys.platform != 'win32'
def item(lib=None):
global _item_ident
_item_ident += 1
i = beets.library.Item(
title=u'the title',
artist=u'the artist',
albumartist=u'the album artist',
album=u'the album',
genre=u'the genre',
lyricist=u'the lyricist',
composer=u'the composer',
arranger=u'the arranger',
grouping=u'the grouping',
year=1,
month=2,
day=3,
track=4,
tracktotal=5,
disc=6,
disctotal=7,
lyrics=u'the lyrics',
comments=u'the comments',
bpm=8,
comp=True,
path='somepath{0}'.format(_item_ident),
length=60.0,
bitrate=128000,
format='FLAC',
mb_trackid='someID-1',
mb_albumid='someID-2',
mb_artistid='someID-3',
mb_albumartistid='someID-4',
album_id=None,
)
if lib:
lib.add(i)
return i
_album_ident = 0
def album(lib=None):
global _item_ident
_item_ident += 1
i = beets.library.Album(
artpath=None,
albumartist=u'some album artist',
albumartist_sort=u'some sort album artist',
albumartist_credit=u'some album artist credit',
album=u'the album',
genre=u'the genre',
year=2014,
month=2,
day=5,
tracktotal=0,
disctotal=1,
comp=False,
mb_albumid='someID-1',
mb_albumartistid='someID-1'
)
if lib:
lib.add(i)
return i
# Dummy import session.
def import_session(lib=None, loghandler=None, paths=[], query=[], cli=False):
cls = commands.TerminalImportSession if cli else importer.ImportSession
return cls(lib, loghandler, paths, query)
class Assertions(object):
"""A mixin with additional unit test assertions."""
def assertExists(self, path): # noqa
self.assertTrue(os.path.exists(util.syspath(path)),
u'file does not exist: {!r}'.format(path))
def assertNotExists(self, path): # noqa
self.assertFalse(os.path.exists(util.syspath(path)),
u'file exists: {!r}'.format((path)))
def assert_equal_path(self, a, b):
"""Check that two paths are equal."""
self.assertEqual(util.normpath(a), util.normpath(b),
u'paths are not equal: {!r} and {!r}'.format(a, b))
# A test harness for all beets tests.
# Provides temporary, isolated configuration.
class TestCase(unittest.TestCase, Assertions):
"""A unittest.TestCase subclass that saves and restores beets'
global configuration. This allows tests to make temporary
modifications that will then be automatically removed when the test
completes. Also provides some additional assertion methods, a
temporary directory, and a DummyIO.
"""
def setUp(self):
# A "clean" source list including only the defaults.
beets.config.sources = []
beets.config.read(user=False, defaults=True)
# Direct paths to a temporary directory. Tests can also use this
# temporary directory.
self.temp_dir = util.bytestring_path(tempfile.mkdtemp())
beets.config['statefile'] = \
util.py3_path(os.path.join(self.temp_dir, b'state.pickle'))
beets.config['library'] = \
util.py3_path(os.path.join(self.temp_dir, b'library.db'))
beets.config['directory'] = \
util.py3_path(os.path.join(self.temp_dir, b'libdir'))
# Set $HOME, which is used by confit's `config_dir()` to create
# directories.
self._old_home = os.environ.get('HOME')
os.environ['HOME'] = util.py3_path(self.temp_dir)
# Initialize, but don't install, a DummyIO.
self.io = DummyIO()
def tearDown(self):
if os.path.isdir(self.temp_dir):
shutil.rmtree(self.temp_dir)
if self._old_home is None:
del os.environ['HOME']
else:
os.environ['HOME'] = self._old_home
self.io.restore()
beets.config.clear()
beets.config._materialized = False
| class LibTestCase(TestCase):
"""A test case that includes an in-memory library object (`lib`) and
an item added to the library (`i`).
"""
def setUp(self):
super(LibTestCase, self).setUp()
self.lib = beets.library.Library(':memory:')
self.i = item(self.lib)
def tearDown(self):
self.lib._connection().close()
super(LibTestCase, self).tearDown()
# Mock timing.
class Timecop(ob | ject):
"""Mocks the timing system (namely time() and sleep()) for testing.
Inspired by the Ruby timecop library.
"""
def __init__(self):
self.now = time.time()
def time(self):
return self.now
def sleep(self, amount):
self.now += amount
def install(self):
self.orig = {
'time': time.time,
'sleep': time.sleep,
}
time.time = self.time
time.sleep = self.sleep
def restore(self):
time.time = self.orig['time']
time.sleep = self.orig['sleep']
# Mock I/O.
class InputException(Exception):
def __init__(self, output=None):
self.output = output
def __str__(self):
msg = "Attempt to read with no input provided."
if self.output is not None:
msg += " Output: {!r}".format(self.output)
return msg
class DummyOut(object):
encoding = 'utf-8'
def __init__(self):
self.buf = []
def write(self, s):
self.buf.append(s)
def get(self):
if six.PY2:
return b''.join(self.buf)
else:
return ''.join(self.buf)
def flush(self):
self.clear()
def clear(self):
self.buf = []
class DummyIn(object):
encoding = 'utf-8'
def __init__(self, out=None):
self.buf = []
self.reads = 0
self.out = out
def add(self, s):
if six.PY2:
self.buf.append(s + b'\n')
else:
self.buf.append(s + '\n')
def readline(self):
if not self.buf:
if self.out:
raise InputException(self.out.get())
else:
raise InputException()
self.reads += 1
return self.buf.pop(0)
class DummyIO(object):
"""Mocks inp |
HEPData/hepdata-converter | hepdata_converter/parsers/yaml_parser.py | Python | gpl-2.0 | 4,770 | 0.003354 | import yaml
# We try to load using the CSafeLoader for speed improvements.
try:
from yaml import CSafeLoader as Loader
except ImportError: #pragma: no cover
from yaml import SafeLoader as Loader #pragma: no cover
from hepdata_validator import LATEST_SCHEMA_VERSION
from hepdata_validator.submission_file_validator import SubmissionFileValidator
from hepdata_validator.data_file_validator import DataFileValidator
from hepdata_converter.parsers import Parser, ParsedData, Table
import os, re
# Allow for a bug in PyYAML where numbers like 1e+04 are parsed as strings not as floats.
# See https://stackoverflow.com/a/30462009
# Try replacing PyYAML by ruamel.yaml in future?
Loader.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(u'''^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
class YAML(Parser):
help = 'Parses New HEPData YAML format. Input parameter should be path to ' \
'the directory where submission.yaml file ' \
'is present (or direct filepath to the submission.yaml file)'
def __init__(self, *args, **kwargs):
super(YAML, self).__init__(*args, **kwargs)
self.validator_schema_version = kwargs.get('validator_schema_version', LATEST_SCHEMA_VERSION)
def _pretty_print_errors(self, message_dict):
return ' '.join(
['%s: %s' % (key, ' | '.join([e.message for e in val])) for
key, val in list(message_dict.items())])
def parse(self, data_in, *args, **kwargs):
"""
:param data_in: path to submission.yaml
:param args:
:param kwargs:
:raise ValueError:
"""
if not os.path.exists(data_in):
raise ValueError("File / Directory does not exist: %s" % data_in)
if os.path.isdir(data_in):
submission_filepath = os.path.join(data_in, 'submission.yaml')
if not os.path.exists(submission_filepath):
submission_filepath = os.path.join(data_in, 'submission.yml')
if not os.path.exists(submission_filepath):
raise ValueError("No submission file in %s" % data_in)
data_in = submission_filepath
# first validate submission file:
with open | (data_in, 'r') as submission_file:
submission_data = list(yaml.load_all(submission_file, Loader=Loader))
if len(submission_data) == 0:
raise RuntimeError("Submission file (%s) is empty" % data_in)
submission_file_validator = SubmissionFileValidator(schema_version=self.validator_schema_version)
if not submission_file_validator.validate(file_ | path=data_in,
data=submission_data):
raise RuntimeError(
"Submission file (%s) did not pass validation: %s" %
(data_in, self._pretty_print_errors(
submission_file_validator.get_messages())))
metadata = {}
tables = []
# validator for table data
data_file_validator = DataFileValidator(schema_version=self.validator_schema_version)
index = 0
for i in range(0, len(submission_data)):
if not submission_data[i]: # empty YAML document
continue
if 'data_file' not in submission_data[i]:
metadata = submission_data[i] # information about whole submission
continue
table_filepath = os.path.join(os.path.dirname(data_in),
submission_data[i]['data_file'])
with open(table_filepath, 'r') as table_file:
if not os.path.exists(table_filepath):
raise ValueError(
"table file: %s does not exist" % table.data_file)
table_data = yaml.load(table_file, Loader=Loader)
if not data_file_validator.validate(data=table_data,
file_path=table_filepath):
raise RuntimeError(
"Data file (%s) did not pass validation: %s" %
(table_filepath, self._pretty_print_errors(
data_file_validator.get_messages())))
index = index + 1
table = Table(index=index, metadata=submission_data[i],
data=table_data)
tables.append(table)
return ParsedData(metadata, tables)
|
audetto/playlist_generator | generate.py | Python | gpl-3.0 | 2,088 | 0.001916 | import os
import sys
import traceback
VALID = ['.mp3', '.ogg', '.m4a']
ENCODING = 'utf-8'
M3U = '.m3u'
SKIP_FOLDERS = ['SCS.4DJ_', 'RECYCLE.BIN', 'System Volume Information']
def skipFolder(name):
for s in SKIP_FOLDERS:
if s in name:
return True # SKIP
return False # ACCEPT
def normalise(base, absolute):
relative = os.path.relpath(absolute, base)
relative = relative.replace('\\', '/')
relative = './' + relative
return relative
def removePlaylist(base):
# we are not recursing in here
filenames = next(os.walk(base))[2]
for file in filenames:
name, ext = os.path.splitext(file)
if ext == M3U:
absolute = os.path.join(base, file)
os.remove(absolute)
def createNewPlaylist(base):
name = os.path.basename(os.path.normpath(base))
playlist = os.path.join(base, name + M3U)
with open(playlist, 'w', encoding = ENCODING) as pl:
for path, dirs, files in os.walk(base):
for f in files:
name, ext = os.path.splitext(f)
if ext in VALID:
absolute = os.path.join(path, f)
relative = normalise(base, absolute)
pl.write(relative)
pl.write('\n')
print('New playlist written to: {}'.format(playlist))
def processFolder(base):
try:
removePlaylist(base)
createNewPlaylist(base)
except (PermissionError, UnicodeDecodeError, UnicodeEncodeError):
traceback.print_exc()
def main(): |
base = sys.argv[1]
for path, dirs, files in os.walk(base):
for dir in dirs:
| absolute = os.path.abspath(os.path.join(path, dir))
if skipFolder(dir):
print('Skipping {}'.format(absolute))
else:
print('Processing {}'.format(absolute))
# this will recurse inside
processFolder(absolute)
print()
# we only look at the immediate children
# they will be recursing inside
break
main()
|
ui/django-rq | django_rq/__init__.py | Python | mit | 151 | 0.019868 | VERSION = (2, 4, 1)
| from .decorators import job
from .queues import enqueue, get_connection, get_queue, get_scheduler
from .workers import get_worker | |
fqez/JdeRobot | src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_param.py | Python | gpl-3.0 | 10,340 | 0.002611 | #!/usr/bin/env python
'''param command handling'''
import time, os, fnmatch
from pymavlink import mavutil, mavparm
from MAVProxy.modules.lib import mp_util
from MAVProxy.modules.lib import mp_module
class ParamState:
'''this class is separated to make it possible to use the parameter
functions on a secondary connection'''
def __init__(self, mav_param, logdir, vehicle_name, parm_file):
self.mav_param_set = set()
self.mav_param_count = 0
self.param_period = mavutil.periodic_event(1)
self.fetch_one = 0
self.mav_param = mav_param
self.logdir = logdir
self.vehicle_name = vehicle_name
self.parm_file = parm_file
def handle_mavlink_packet(self, master, m):
'''handle an incoming mavlink packet'''
if m.get_type() == 'PARAM_VALUE':
param_id = "%.16s" % m.param_id
# Note: the xml specifies param_index is a uint16, so -1 in that field will show as 65535
# We accept both -1 and 65535 as 'unknown index' to future proof us against someday having that
# xml fixed.
if m.param_index != -1 and m.param_index != 65535 and m.param_index not in self.mav_param_set:
added_new_parameter = True
self.mav_param_set.add(m.param_index)
else:
added_new_parameter = False
if m.param_count != -1:
self.mav_param_count = m.param_count
self.mav_param[str(param_id)] = m.param_value
if self.fetch_one > 0:
self.fetch_one -= 1
print("%s = %f" % (param_id, m.param_value))
if added_new_parameter and len(self.mav_param_set) == m.param_count:
print("Received %u parameters" % m.param_count)
if self.logdir != None:
self.mav_param.save(os.path.join(self.logdir, self.parm_file), '*', verbose=True)
def fetch_check(self, master):
'''check for missing parameters periodically'''
if self.param_period.trigger():
if master is None:
return
if len(self.mav_param_set) == 0:
master.param_fetch_all()
elif self.mav_param_count != 0 and len(self.mav_param_set) != self.mav_param_count:
if master.time_since('PARAM_VALUE') >= 1:
diff = set(range(self.mav_param_count)).difference(self.mav_param_set)
count = 0
while len(diff) > 0 and count < 10:
idx = diff.pop()
master.param_fetch_one(idx)
count += 1
def param_help_download(self):
'''download XML files for parameters'''
import multiprocessing
files = []
for vehicle in ['APMrover2', 'ArduCopter', 'ArduPlane']:
url = 'http://autotest.diydrones.com/Parameters/%s/apm.pdef.xml' % vehicle
path = mp_util.dot_mavproxy("%s.xml" % vehicle)
files.append((url, path))
url = 'http://autotest.diydrones.com/%s-defaults.parm' % vehicle
path = mp_util.dot_mavproxy("%s-defaults.parm" % vehicle)
files.append((url, path))
try:
child = multiprocessing.Process(target=mp_util.download_files, args=(files,))
child.start()
except Exception as e:
print(e)
def param_help(self, args):
'''show help on a parameter'''
if len(args) == 0:
print("Usage: param help PARAMETER_NAME")
return
if self.vehicle_name is None:
print("Unknown vehicle type")
return
path = mp_util.dot_mavproxy("%s.xml" % self.vehicle_name)
if not os.path.exists(path):
print("Please run 'param download' first (vehicle_name=%s)" % self.vehicle_name)
return
xml = open(path).read()
from lxml import objectify
objectify.enable_recursive_str()
tree = objectify.fromstring(xml)
htree = {}
for p in tree.vehicles.parameters.param:
n = p.get('name').split(':')[1]
htree[n] = p
for lib in tree.libraries.parameters:
for p in lib.param:
n = p.get('name')
htree[n] = p
for h in args:
if h in htree:
help = htree[h]
print("%s: %s\n" % (h, help.get('humanName')))
print(help.get('documentation'))
try:
vchild = help.getchildren()[0]
print("\nValues: ")
for v in vchild.value:
print("\t%s : %s" % (v.get('code'), str(v)))
except Exception as e:
pass
else:
print("Parameter '%s' not found in documentation" % h)
def handle_command(self, master, mpstate, args):
'''handle parameter commands'''
param_wildcard = "*"
usage="Usage: param <fetch|set|show|load|preload|forceload|diff|download|help>"
if len(args) < 1:
print(usage)
return
if args[0] == "fetch":
if len(args) == 1:
master.param_fetch_all()
self.mav_param_set = set()
print("Requested parameter list")
else:
for p in self.mav_param.keys():
if fnmatch.fnmatch(p, args[1].upper()):
master.param_fetch_one(p)
self.fetch_one += 1
print("Requested parameter %s" % p)
elif args[0] == "save":
if len(args) < 2:
print("usage: param save <filename> [wildcard]")
return
if len(args) > 2:
param_wildcard = args[2]
e | lse:
param_wildcard = "*"
self.mav_param.sav | e(args[1], param_wildcard, verbose=True)
elif args[0] == "diff":
wildcard = '*'
if len(args) < 2 or args[1].find('*') != -1:
if self.vehicle_name is None:
print("Unknown vehicle type")
return
filename = mp_util.dot_mavproxy("%s-defaults.parm" % self.vehicle_name)
if not os.path.exists(filename):
print("Please run 'param download' first (vehicle_name=%s)" % self.vehicle_name)
return
if len(args) >= 2:
wildcard = args[1]
else:
filename = args[1]
if len(args) == 3:
wildcard = args[2]
print("%-16.16s %12.12s %12.12s" % ('Parameter', 'Defaults', 'Current'))
self.mav_param.diff(filename, wildcard=wildcard)
elif args[0] == "set":
if len(args) < 2:
print("Usage: param set PARMNAME VALUE")
return
if len(args) == 2:
self.mav_param.show(args[1])
return
param = args[1]
value = args[2]
if value.startswith('0x'):
value = int(value, base=16)
if not param.upper() in self.mav_param:
print("Unable to find parameter '%s'" % param)
return
self.mav_param.mavset(master, param.upper(), value, retries=3)
if (param.upper() == "WP_LOITER_RAD" or param.upper() == "LAND_BREAK_PATH"):
#need to redraw rally points
mpstate.module('rally').rallyloader.last_change = time.time()
#need to redraw loiter points
mpstate.module('wp').wploader.last_change = time.time()
elif args[0] == "load":
if len(args) < 2:
print("Usage: param load <filename> [wildcard]")
return
if len(args) > 2:
param_wildcard = args[2]
else:
param_wildcard = "*"
self.mav_param.load(args[1], param_wildcard, master)
elif args[0] == "preload":
if |
gmist/ctm-5studio | main/auth/twitter.py | Python | mit | 1,468 | 0.006812 | # coding: utf-8
from __future__ import absolute_import
import flask
import auth
import config
import model
import util
from main import app
twitter_config = dict(
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='h | ttps://api.twitter.com/oauth/authorize',
base_url='https://api.twitter.com/1.1/',
consumer_key=config.CONFIG_DB.twitter_consumer_key,
consumer_secret=config.CONFIG_DB.twitter_consumer_secret,
request_token_url='https://api.twitter.com/oauth/request_token',
)
twitter = auth.crea | te_oauth_app(twitter_config, 'twitter')
@app.route('/api/auth/callback/twitter/')
def twitter_authorized():
response = twitter.authorized_response()
if response is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (
response['oauth_token'],
response['oauth_token_secret'],
)
user_db = retrieve_user_from_twitter(response)
return auth.signin_user_db(user_db)
@twitter.tokengetter
def get_twitter_token():
return flask.session.get('oauth_token')
@app.route('/signin/twitter/')
def signin_twitter():
return auth.signin_oauth(twitter)
def retrieve_user_from_twitter(response):
auth_id = 'twitter_%s' % response['user_id']
user_db = model.User.get_by('auth_ids', auth_id)
return user_db or auth.create_user_db(
auth_id=auth_id,
name=response['screen_name'],
username=response['screen_name'],
)
|
django-school-management/ssms | ssms/common/facilities/hostel/apps.py | Python | lgpl-3.0 | 128 | 0 | from __future__ imp | ort unicode_literals
from django.app | s import AppConfig
class HostelConfig(AppConfig):
name = 'hostel'
|
RedhawkSDR/integration-gnuhawk | components/multiply_ff_2i/tests/test_multiply_ff_2i.py | Python | gpl-3.0 | 4,535 | 0.006615 | #!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of GNUHAWK.
#
# GNUHAWK is free software: you can redistribute it and/or modify is under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# GNUHAWK is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program. If not, see http://www.gnu.org/licenses/.
#
import unittest
import ossie.utils.testing
import os
from omniORB import any
class ComponentTests(ossie.utils.testing.ScaComponentTestCase):
"""Test for all component implementations in multiply_ff_2i"""
def testScaBasicBehavior(self):
#######################################################################
# Launch the component with the default execparams
execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False)
execparams = dict([(x.id, any.from_any(x.value)) for x in execparams])
self.launch(execparams)
#######################################################################
# Verify the basic state of the component
self.assertNotEqual(self.comp, None)
self.assertEqual(self.comp.ref._non_existent(), False)
self.assertEqual(self.comp.ref._is_a("ID | L:CF/Resource:1.0"), True)
self.assertEqual(self.spd.get_id(), self.comp.ref._get_identifier())
|
#######################################################################
# Simulate regular component startup
# Verify that initialize nor configure throw errors
self.comp.initialize()
configureProps = self.getPropertySet(kinds=("configure",), modes=("readwrite", "writeonly"), includeNil=False)
self.comp.configure(configureProps)
#######################################################################
# Validate that query returns all expected parameters
# Query of '[]' should return the following set of properties
expectedProps = []
expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True))
expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True))
props = self.comp.query([])
props = dict((x.id, any.from_any(x.value)) for x in props)
# Query may return more than expected, but not less
for expectedProp in expectedProps:
self.assertEquals(props.has_key(expectedProp.id), True)
#######################################################################
# Verify that all expected ports are available
for port in self.scd.get_componentfeatures().get_ports().get_uses():
port_obj = self.comp.getPort(str(port.get_usesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True)
for port in self.scd.get_componentfeatures().get_ports().get_provides():
port_obj = self.comp.getPort(str(port.get_providesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a(port.get_repid()), True)
#######################################################################
# Make sure start and stop can be called without throwing exceptions
self.comp.start()
self.comp.stop()
#######################################################################
# Simulate regular component shutdown
self.comp.releaseObject()
# TODO Add additional tests here
#
# See:
# ossie.utils.bulkio.bulkio_helpers,
# ossie.utils.bluefile.bluefile_helpers
# for modules that will assist with testing components with BULKIO ports
if __name__ == "__main__":
ossie.utils.testing.main("../multiply_ff_2i.spd.xml") # By default tests all implementations
|
edisonlz/fruit | web_project/base/site-packages/bitfield/models.py | Python | apache-2.0 | 9,420 | 0.001062 | from django.db.models import signals
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.fields import Field, BigIntegerField
from django.db.models.fields.subclassing import Creator
try:
from django.db.models.fields.subclassing import SubfieldBase
except ImportError:
# django 1.2
from django.db.models.fields.subclassing import LegacyConnection as SubfieldBase # NOQA
import six
from bitfield.forms import BitFormField
from bitfield.query import BitQueryLookupWrapper
from bitfield.types import BitHandler, Bit
# Count binary capacity. Truncate "0b" prefix from binary form.
# Twice faster than bin(i)[2:] or math.floor(math.log(i))
MAX_FLAG_COUNT = int(len(bin(BigIntegerField.MAX_BIGINT)) - 2)
class BitFieldFlags(object):
def __init__(self, flags):
if len(flags) > MAX_FLAG_COUNT:
raise ValueError('Too many flags')
self._flags = flags
def __repr__(self):
return repr(self._flags)
def __iter__(self):
for flag in self._flags:
yield flag
def __getattr__(self, key):
if key not in self._flags:
raise AttributeError
return Bit(self._flags.index(key))
def iteritems(self):
for flag in self._flags:
yield flag, Bit(self._flags.index(flag))
def iterkeys(self):
for flag in self._flags:
yield flag
def itervalues(self):
for flag in self._flags:
yield Bit(self._flags.index(flag))
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
class BitFieldCreator(Creator):
"""
Descriptor for BitFields. Checks to make sure that all flags of the
instance match the class. This is to handle the case when caching
an older version of the instance and a newer version of the class is
available (usually during deploys).
"""
def __get__(self, obj, type=None):
if obj is None:
return BitFieldFlags(self.field.flags)
retval = obj.__dict__[self.field.name]
if self.field.__class__ is BitField:
# Update flags from class in case they've changed.
retval._keys = self.field.flags
return retval
class BitFieldMeta(SubfieldBase):
"""
Modified SubFieldBase to use our contribute_to_class method (instead of
monkey-patching make_contrib). This uses our BitFieldCreator descriptor
in place of the default.
NOTE: If we find ourselves needing custom descriptors for fields, we could
make this generic.
"""
def __new__(cls, name, bases, attrs):
def contribute_to_class(self, cls, name):
BigIntegerField.contribute_to_class(self, cls, name)
setattr(cls, self.name, BitFieldCreator(self))
new_class = super(BitFieldMeta, cls).__new__(cls, name, bases, attrs)
new_class.contribute_to_class = contribute_to_class
return new_class
class BitField(six.with_metaclass(BitFieldMeta, BigIntegerField)):
def __init__(self, flags, default=None, *args, **kwargs):
if isinstance(flags, dict):
# Get only integer keys in correct range
valid_keys = (k for k in flags.keys() if isinstance(k, int) and (0 <= k < MAX_FLAG_COUNT))
if not valid_keys:
raise ValueError('Wrong keys or empty dictionary')
# Fill list with values from dict or with empty values
flags = [flags.get(i, '') for i in range(max(valid_keys) + 1)]
if len(flags) > MAX_FLAG_COUNT:
raise ValueError('Too many flags')
self._arg_flags = flags
flags = list(flags)
labels = []
for num, flag in enumerate(flags):
if isinstance(flag, (tuple, list)):
flags[num] = flag[0]
labels.append(flag[1])
else:
labels.append(flag)
if isinstance(default, (list, tuple, set, frozenset)):
new_value = 0
for flag in default:
| new_value |= Bit(flags.index(flag))
default = new_value
BigIntegerField.__init__(self, default= | default, *args, **kwargs)
self.flags = flags
self.labels = labels
def south_field_triple(self):
"Returns a suitable description of this field for South."
from south.modelsinspector import introspector
field_class = "django.db.models.fields.BigIntegerField"
args, kwargs = introspector(self)
return (field_class, args, kwargs)
def formfield(self, form_class=BitFormField, **kwargs):
choices = [(k, self.labels[self.flags.index(k)]) for k in self.flags]
return Field.formfield(self, form_class, choices=choices, **kwargs)
def pre_save(self, instance, add):
value = getattr(instance, self.attname)
return value
def get_prep_value(self, value):
if value is None:
return None
if isinstance(value, (BitHandler, Bit)):
value = value.mask
return int(value)
# def get_db_prep_save(self, value, connection):
# if isinstance(value, Bit):
# return BitQuerySaveWrapper(self.model._meta.db_table, self.name, value)
# return super(BitField, self).get_db_prep_save(value, connection=connection)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if isinstance(value, SQLEvaluator) and isinstance(value.expression, Bit):
value = value.expression
if isinstance(value, (BitHandler, Bit)):
return BitQueryLookupWrapper(self.model._meta.db_table, self.db_column or self.name, value)
return BigIntegerField.get_db_prep_lookup(self, lookup_type=lookup_type, value=value,
connection=connection, prepared=prepared)
def get_prep_lookup(self, lookup_type, value):
if isinstance(value, SQLEvaluator) and isinstance(value.expression, Bit):
value = value.expression
if isinstance(value, Bit):
if lookup_type in ('exact',):
return value
raise TypeError('Lookup type %r not supported with `Bit` type.' % lookup_type)
return BigIntegerField.get_prep_lookup(self, lookup_type, value)
def to_python(self, value):
if isinstance(value, Bit):
value = value.mask
if not isinstance(value, BitHandler):
# Regression for #1425: fix bad data that was created resulting
# in negative values for flags. Compute the value that would
# have been visible ot the application to preserve compatibility.
if isinstance(value, six.integer_types) and value < 0:
new_value = 0
for bit_number, _ in enumerate(self.flags):
new_value |= (value & (2 ** bit_number))
value = new_value
value = BitHandler(value, self.flags, self.labels)
else:
# Ensure flags are consistent for unpickling
value._keys = self.flags
return value
def deconstruct(self):
name, path, args, kwargs = super(BitField, self).deconstruct()
args.insert(0, self._arg_flags)
return name, path, args, kwargs
class CompositeBitFieldWrapper(object):
def __init__(self, fields):
self.fields = fields
def __getattr__(self, attr):
if attr == 'fields':
return super(CompositeBitFieldWrapper, self).__getattr__(attr)
for field in self.fields:
if hasattr(field, attr):
return getattr(field, attr)
raise AttributeError('%s is not a valid flag' % attr)
def __hasattr__(self, attr):
if attr == 'fields':
return super(CompositeBitFieldWrapper, self).__hasattr__(attr)
for field in self.fields:
if hasattr(field, attr):
return True
return False
def __setattr__(self, attr, value):
if attr == 'fields':
super(CompositeBitFieldWr |
jimga150/HealthNet | HealthNet/prescriptions/apps.py | Python | mit | 101 | 0 | from django.apps import AppConfig |
class PerscriptionsConfig(AppConfig):
n | ame = 'prescriptions'
|
evancich/apm_motor | modules/waf/waflib/extras/pep8.py | Python | gpl-3.0 | 3,477 | 0.032787 | #! /usr/bin/env python
# encoding: utf-8
#
# written by Sylvain Rouquette, 2011
'''
Install pep8 module:
$ easy_install pep8
or
$ pip install pep8
To add the boost tool to the waf file:
$ ./waf-light --tools=compat15,pep8
or, if you have waf >= 1.6.2
$ ./waf update --files=pep8
Then add this to your wscript:
[at]extension('.py', 'wscript')
def run_pep8(self, node):
self.create_task('Pep8', node)
'''
import threading
from waflib import Task, Options
pep8 = __import__('pep8')
class Pep8(Task.Task):
color = 'PINK'
lock = threading.Lock()
def check_options(self):
if pep8.options:
return
pep8.options = Options.options
pep8.options.prog = 'pep8'
excl = pep8.options.exclude.split(',')
pep8.options.exclude = [s.rstrip('/') for s in excl]
if pep8.options.filename:
pep8.options.filename = pep8.options.filename.split(',')
if pep8.options.select:
pep8.options.select = pep8.options.select.split(',')
else:
pep8.options.select = []
if pep8.options.ignore:
pep8.options.ignore = pep8.options.ignore.split(',')
elif pep8.options.select:
# Ignore all checks which are not explicitly selected
pep8.options.ignore = ['']
elif pep8.options.testsuite or pep8.options.doctest:
# For doctest and testsuite, all checks are required
pep8.options.ignore = []
else:
# The default choice: ignore controversial checks
pep8.options.ignore = pep8.DEFAULT_IGNORE.split(',')
pep8.options.physical_checks = pep8.find_checks('physical_line')
pep8.options.logical_checks = pep8.find_checks('logical_line')
pep8.options.counters = dict.fromkeys(pep8.BENCHMARK_KEYS, 0)
pep8.options.messages = {}
def run(self):
with Pep8.lock:
self.check_options()
pep8.input_file(self.inputs[0].abspath())
return 0 if not pep8.get_count() else -1
def options(opt):
opt.add_option('-q', '--quiet', default=0, action='count',
help="report only file names, or nothing with -qq")
opt.add_option('-r', '--repeat', action='store_true',
help="show all occurr | ences of the same error")
opt.add_option('--exclude', metavar='patterns',
default=pep8.DEFAULT_EXCLUDE,
help="exclude files or directories which match these "
"comma separated patterns (default: %s)" %
pep8.DEFAULT_EXCLUDE,
dest='exclu | de')
opt.add_option('--filename', metavar='patterns', default='*.py',
help="when parsing directories, only check filenames "
"matching these comma separated patterns (default: "
"*.py)")
opt.add_option('--select', metavar='errors', default='',
help="select errors and warnings (e.g. E,W6)")
opt.add_option('--ignore', metavar='errors', default='',
help="skip errors and warnings (e.g. E4,W)")
opt.add_option('--show-source', action='store_true',
help="show source code for each error")
opt.add_option('--show-pep8', action='store_true',
help="show text of PEP 8 for each error")
opt.add_option('--statistics', action='store_true',
help="count errors and warnings")
opt.add_option('--count', action='store_true',
help="print total number of errors and warnings "
"to standard error and set exit code to 1 if "
"total is not null")
opt.add_option('--benchmark', action='store_true',
help="measure processing speed")
opt.add_option('--testsuite', metavar='dir',
help="run regression tests from dir")
opt.add_option('--doctest', action='store_true',
help="run doctest on myself")
|
coldnight/homu | homu/server.py | Python | mit | 27,593 | 0 | import hmac
import json
import urllib.parse
import subprocess
from .main import (
PullReqState,
parse_commands,
db_query,
INTERRUPTED_BY_HOMU_RE,
synchronize,
)
from . import utils
from . import gitlab
from .utils import lazy_debug
import jinja2
import requests
import pkg_resources
from bottle import (
get,
post,
run,
request,
redirect,
abort,
response,
)
from threading import Thread
import sys
import os
import traceback
from retrying import retry
import bottle
bottle.BaseRequest.MEMFILE_MAX = 1024 * 1024 * 10
class G:
pass
g = G()
def find_state(sha):
for repo_label, repo_states in g.states.items():
for state in repo_states.values():
if state.merge_sha == sha:
return state, repo_label
raise ValueError('Invalid SHA')
def get_repo(repo_label, repo_cfg):
repo = g.repos[repo_label].gh
if not repo:
repo = g.gh.repository(repo_cfg['owner'], repo_cfg['name'])
g.repos[repo_label] = repo
assert repo.owner.login == repo_cfg['owner']
assert repo.name == repo_cfg['name']
return repo
@get('/')
def index():
return g.tpls['index'].render(repos=[g.repos[label]
for label in sorted(g.repos)])
@get('/queue/<repo_label:path>')
def queue(repo_label):
logger = g.logger.getChild('queue')
lazy_debug(logger, lambda: 'repo_label: {}'.format(repo_label))
single_repo_closed = None
if repo_label == 'all':
labels = g.repos.keys()
multiple = True
repo_url = None
else:
labels = repo_label.split('+')
multiple = len(labels) > 1
if repo_label in g.repos and g.repos[repo_label].treeclosed >= 0:
single_repo_closed = g.repos[repo_label].treeclosed
repo_url = '{}/{}/{}'.format(
g.cfg["gitlab"]["host"],
g.cfg['repo'][repo_label]['owner'],
g.cfg['repo'][repo_label]['name'])
states = []
for label in labels:
try:
states += g.sta | tes[label].values()
except KeyError:
abort(404, 'No such repository: {}'.format(label))
pull_states = sorted(states)
rows = []
for state in pull_states:
treeclosed = (single_repo_closed or
state.priority < g.repos[state.repo_label].treeclosed)
status_ext = ''
if state | .try_:
status_ext += ' (try)'
if treeclosed:
status_ext += ' [TREE CLOSED]'
rows.append({
'status': state.get_status(),
'status_ext': status_ext,
'priority': 'rollup' if state.rollup else state.priority,
'url': '{}/{}/{}/merge_requests/{}'.format(
g.cfg["gitlab"]["host"],
state.owner,
state.name,
state.num
),
'num': state.num,
'approved_by': state.approved_by,
'title': state.title,
'head_ref': state.head_ref,
'mergeable': ('yes' if state.mergeable is True else
'no' if state.mergeable is False else ''),
'assignee': state.assignee,
'repo_label': state.repo_label,
'repo_url': '{}/{}/{}'.format(
g.cfg["gitlab"]["host"],
state.owner,
state.name
),
'greyed': "treeclosed" if treeclosed else "",
})
return g.tpls['queue'].render(
repo_url=repo_url,
repo_label=repo_label,
treeclosed=single_repo_closed,
states=rows,
oauth_client_id=g.cfg['gitlab']['app_client_id'],
total=len(pull_states),
approved=len([x for x in pull_states if x.approved_by]),
rolled_up=len([x for x in pull_states if x.rollup]),
failed=len([x for x in pull_states if x.status == 'failure' or
x.status == 'error']),
multiple=multiple,
)
@get('/callback')
def callback():
logger = g.logger.getChild('callback')
response.content_type = 'text/plain'
code = request.query.code
state = json.loads(request.query.state)
lazy_debug(logger, lambda: 'state: {}'.format(state))
oauth_url = '{}/login/oauth/access_token'.format(
g.cfg["gitlab"]["host"],
)
try:
res = requests.post(oauth_url, data={
'client_id': g.cfg['gitlab']['app_client_id'],
'client_secret': g.cfg['gitlab']['app_client_secret'],
'code': code,
})
except Exception as ex:
logger.warn('/callback encountered an error '
'during gitlab oauth callback')
# probably related to https://gitlab.com/pycqa/flake8/issues/42
lazy_debug(logger, lambda: 'gitlab.oauth callback err: {}'.format(ex)) # noqa
abort(502, 'Bad Gateway')
args = urllib.parse.parse_qs(res.text)
token = args['access_token'][0]
repo_label = state['repo_label']
repo_cfg = g.repo_cfgs[repo_label]
repo = get_repo(repo_label, repo_cfg)
user_gh = gitlab.login(token)
if state['cmd'] == 'rollup':
return rollup(user_gh, state, repo_label, repo_cfg, repo)
elif state['cmd'] == 'synch':
return synch(user_gh, state, repo_label, repo_cfg, repo)
else:
abort(400, 'Invalid command')
def rollup(user_gh, state, repo_label, repo_cfg, repo):
user_repo = user_gh.repository(user_gh.user().login, repo.name)
base_repo = user_gh.repository(repo.owner.login, repo.name)
nums = state.get('nums', [])
if nums:
try:
rollup_states = [g.states[repo_label][num] for num in nums]
except KeyError as e:
return 'Invalid PR number: {}'.format(e.args[0])
else:
rollup_states = [x for x in g.states[repo_label].values() if x.rollup]
rollup_states = [x for x in rollup_states if x.approved_by]
rollup_states.sort(key=lambda x: x.num)
if not rollup_states:
return 'No pull requests are marked as rollup'
base_ref = rollup_states[0].base_ref
base_sha = repo.ref('heads/' + base_ref).object.sha
gitlab.set_ref(
user_repo,
'heads/' + repo_cfg.get('branch', {}).get('rollup', 'rollup'),
base_sha,
force=True,
)
successes = []
failures = []
for state in rollup_states:
if base_ref != state.base_ref:
failures.append(state.num)
continue
merge_msg = 'Rollup merge of #{} - {}, r={}\n\n{}\n\n{}'.format(
state.num,
state.head_ref,
state.approved_by,
state.title,
state.body,
)
try:
rollup = repo_cfg.get('branch', {}).get('rollup', 'rollup')
user_repo.merge(rollup, state.head_sha, merge_msg)
except gitlab.CommonError as e:
if e.code != 409:
raise
failures.append(state.num)
else:
successes.append(state.num)
title = 'Rollup of {} pull requests'.format(len(successes))
body = '- Successful merges: {}\n\n- Failed merges: {}'.format(
', '.join('#{}'.format(x) for x in successes),
', '.join('#{}'.format(x) for x in failures),
)
try:
rollup = repo_cfg.get('branch', {}).get('rollup', 'rollup')
pull = base_repo.create_pull(
title,
state.base_ref,
user_repo.owner.login + ':' + rollup,
body,
)
except gitlab.CommonError as e:
return e.response.text
else:
redirect(pull.html_url)
@post('/gitlab')
def gitlab_hook():
logger = g.logger.getChild('gitlab')
response.content_type = 'text/plain'
info = request.json
lazy_debug(logger, lambda: 'info: {}'.format(utils.remove_url_keys_from_json(info))) # noqa
try:
path = urllib.parse.urlparse(info["repository"]["homepage"]).path
repo_parts = path.split("/")[1:]
except KeyError:
repo_parts = info["project"]["path_with_namespace"].split("/")
owner = '/'.join(repo_parts[0:-1])
repo_name = repo_parts[-1]
repo |
mozilla/amo-validator | validator/unicodehelper.py | Python | bsd-3-clause | 1,360 | 0 | import codecs
import re
# Many thanks to nmaier for inspiration and code in this module
UNICODE_BOMS = [
(codecs.BOM_UTF8, 'utf-8'),
(codecs.BOM_UTF32_LE, 'utf-32-le'),
(codecs.BOM_UTF32_BE, 'utf-32-be'),
(codecs.BOM_UTF16_LE, 'utf-16-le'),
(codecs.BOM_UTF16_BE, 'utf-16-be'),
]
COMMON_ENCODINGS = ('latin_1', 'utf-16')
# Matches any non-ASCII characters, and any unprintable characters in the
# 7-bit ASCII range. Accepts tab, return, newline, and any other character
# code above 0x20 which fits in 7 bits.
NON_ASCII_FILTER = re.compile(r'[^\t\r\n\x20-\x7f]+')
def decode(data):
"""
Decode data employing | some charset detection and including unicode BOM
stripping.
"""
if isinstance(data, unicode):
return data
# Detect standard unicode BOMs.
for bom, encoding in UNICODE_BOMS:
if data.startswith(bom):
return data[len(bom):].decode(encoding, errors='ignore')
# Try straight UTF-8.
t | ry:
return data.decode('utf-8')
except UnicodeDecodeError:
pass
# Test for various common encodings.
for encoding in COMMON_ENCODINGS:
try:
return data.decode(encoding)
except UnicodeDecodeError:
pass
# Anything else gets filtered.
return NON_ASCII_FILTER.sub('', data).decode('ascii', errors='replace')
|
keras-team/keras | keras/engine/training_distributed_v1.py | Python | apache-2.0 | 29,172 | 0.006822 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Part of the Keras training engine related to distributed training."""
import tensorflow.compat.v2 as tf
# pylint: disable=protected-access
import numpy as np
from tensorflow.python.distribute import input_lib
from keras import backend
from keras import callbacks as cbks
from keras.distribute import distribute_coordinator_utils as dc
from keras.distribute import distributed_training_utils_v1 as dist_utils
from keras.engine import partial_batch_padding_handler as padding_util
from keras.engine import training_arrays_v1
from keras.engine import training_utils_v1
from keras.utils.generic_utils import Progbar
from keras.utils.mode_keys import ModeKeys
from tensorflow.python.platform import tf_logging as logging
def _per_replica_execution_function(model, mode):
exec_func = model._make_execution_function(mode)
return (exec_func.inputs, exec_func.outputs, exec_func.updates_op,
exec_func.session_kwargs)
def _build_model(strategy, model, mode, inputs, targets=None):
if model._compile_distribution:
dist_utils.clone_model_on_replicas(
model, strategy, mode, inputs=inputs, targets=targets)
else:
dist_utils._build_distributed_network(model, strategy, mode, inputs,
targets)
def _make_train_step_fn(model, mode, strategy, output_labels):
"""Create step fn.
Args:
model: a Keras Model instance.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
strategy: a `tf.distribute.Strategy` instance.
output_labels: the output labels for the step function.
Returns:
A step function to run by `tf.distribute.Strategy`.
"""
def _step_fn(ctx, inputs):
"""A step fn that returns update ops."""
if isinstance(inputs, (tuple, list)) and len(inputs) == 2:
inputs, targets = inputs
else:
targets = None
# When input feature is a dictionary of tensors, dictionary is flattended
# to an array and passed as a model input. This results in input mismatch
# when model input layer names are not sorted in alphabetical order as
# `nest.flatten()`sorts dictionary elements by keys. As so, transform input
# tensors into an array and order it along `model._feed_input_names`.
if isinstance(inputs, dict):
inputs = [inputs[input_name] for input_name in model._feed_input_names]
_build_model(strategy, model, mode, inputs, targets)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = strategy.extended.call_for_each_replica(
_per_replica_execution_function,
args=(dist_utils.get_distributed_model(model, mode), mode))
(all_inputs, all_outputs, all_updates,
all_session_args) = dist_utils.unwrap_values(strategy, grouped_inputs,
grouped_outputs,
grouped_updates,
grouped_session_args)
combined_fn = backend.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_' + str(mode) + '_function',
**all_session_args)
for label, output in zip(output_labels, combined_fn.outputs):
if label == 'loss':
reduce_op = tf.distribute.ReduceOp.SUM
else:
# We reduce all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
reduce_op = tf.distribute.ReduceOp.MEAN
ctx.set_last_step_output(label, output, reduce_op)
# TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn:
# feed_dict, session kwargs, run options, run_metadata for now. These should
# be handled appropriately
return combined_fn.updates_op
return _step_fn
def experimental_tpu_fit_loop(model,
dataset,
epochs=100,
verbose=1,
callbacks=None,
initial_epoch=0,
steps_per_epoch=None,
val_dataset=None,
validation_steps=None,
validation_freq=1):
"""Fit loop for training with TPU tf.distribute.Strategy.
Args:
model: Keras Model instance.
dataset: Dataset that returns inputs and targets
epochs: Number of times to iterate over the data
verbose: Integer, Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
val_dataset: Dataset for validation data.
validation_steps: Number of steps to run validation for
(only if doing validation from data tensors).
Ignored with the default value of `None`.
validation_freq: Only relevant if validation data is provided. Integer or
`collections.abc.Container` instance (e.g. list, tuple, etc.). If an
integer, specifies how many training epochs to run before a new
validation run is performed, e.g. `validation_freq=2` runs
validation every 2 epochs. If a Container, specifies the epochs on
which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
validation at the end of the 1st, 2nd, and 10th epochs.
Returns:
Returns `None`.
Raises:
ValueError: in case of invalid arguments.
"""
mode = ModeKeys.TRAIN
current_strategy = model._distribution_strategy
iteration_value = min(steps_per_epoch,
current_strategy.extended.steps_per_run)
steps_per_run = backend.variable(
value=iteration_value,
dtype='int32',
name='steps_per_run')
# TODO(fchollet): add support for `steps_per_epoch=None` in TPU loops.
iterator = dist_utils.get_iterator(dataset, current_strategy)
scope = dist_utils.distributed_scope(
strategy=current_strategy, learning_phase=1)
scope.__enter__()
out_labels = model.metrics_names or []
step_fn = _make_train_step_fn(model, ModeKeys.TRAIN, current_strategy,
out_labels)
# Add initial dummy values for loss and other metric tensors.
initial_loop_values = {}
initial_loop_values['loss'] = tf.constant(1e7)
for m in model._get_training_eval_metrics():
tensor = m.result()
initial_loop_values[m.name] = tf.zeros(tensor.shape, tensor.dtype)
ctx = current_strategy.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=steps_per_run,
initial_loop_values=initial_loop_values)
train_op = ctx.run_op
output_tensors = ctx.last_step_outputs
do_validation = bool(validation_steps)
if model._compile_distribution:
dist_utils._copy_weights_to_distributed_model(model, mode)
ca | llbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=do_validation,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
verbose=verbose,
count_mode='steps',
mode=mode)
# Calculate the steps each time on the device.
steps_to_run = | ([current_strategy.extended.steps_per_run] *
(steps_per_epoch //
current_strategy.extended. |
rayene/buildall | buildall/core.py | Python | mit | 4,402 | 0.000227 | import datetime
from pathlib import PosixPath as PythonPathClass
from subprocess import Popen as PythonPopenClass
# TODO: find a better value
END_OF_TIME = datetime.datetime(2100, 1, 1)
BEGINNING_OF_TIME = datetime.datetime(1970, 1, 1)
class BaseTask:
_indent_level = 0
silent = False
_child_tasks = []
def __str__(self):
return self.__class__.__name__
def build(self, *args):
raise NotIm | plementedError | ('You should implement your own build()')
def target(self):
return None
def debug(self, msg):
indent = self._indent_level * '\t'
if not self.silent:
print(indent + '<%s> ' % self + msg)
def set_indent_level(self, level):
self._indent_level = level
def is_up_to_date(self, dependencies_modification_time):
if self.modification_time < dependencies_modification_time:
self.debug('Target unsatisfied (%s). Will trigger the build !'
% self.modification_time)
return False
self.debug('Target is up-to-date')
return True
class Task(BaseTask):
@property
def modification_time(self):
mod_times = [target.modification_time for target in [self.target()] if
target is not None]
if not mod_times:
return BEGINNING_OF_TIME
return max(mod_times)
def make(self):
self.debug('')
newest_dependency_mod_time = BEGINNING_OF_TIME
build_params = []
for child in self._child_tasks:
child.set_indent_level(self._indent_level + 1)
dependency_mod_time = child.make()
newest_dependency_mod_time = max(newest_dependency_mod_time,
dependency_mod_time)
build_params.append(child.target())
if newest_dependency_mod_time == END_OF_TIME:
# self.debug('At least, one of the dependencies triggered the
# build')
self.build(*build_params)
self.debug('Regeneration succeeded !')
return END_OF_TIME
# self.debug('Cannot decide based on dependencies. Checking targets')
for target in [self.target()]:
if target is None:
continue
target.set_indent_level(self._indent_level + 1)
if not target.is_up_to_date(newest_dependency_mod_time):
self.build(*build_params)
self.debug('Regeneration succeeded !')
return END_OF_TIME
self.debug('Nothing to do !')
return self.modification_time
def __add__(self, other):
return TargetList() + self + other
def __lshift__(self, other):
# 'other' can be a task or a list of tasks
try:
iter(other)
self._child_tasks = other
except TypeError:
self._child_tasks = [other]
return self
class TargetList(list):
def __add__(self, other):
if isinstance(other, BaseTask):
self.append(other)
else:
super().__add__(other)
return self
class Path(PythonPathClass, BaseTask):
def target(self):
return self
@property
def modification_time(self):
if self.exists():
mod_ts = self.stat().st_mtime_ns
return datetime.datetime.fromtimestamp(mod_ts / 1000000000)
return datetime.datetime(1969, 12, 31)
def make(self):
mod_ts = self.stat().st_mtime_ns
mod_dt = datetime.datetime.fromtimestamp(mod_ts / 1000000000)
self.debug('Dependency file exists and its date is %s' % mod_dt)
return mod_dt
class Popen(PythonPopenClass, BaseTask):
def target(self):
return self
def __str__(self):
return self.__class__.__name__ + ' - ' + str(self.args)
@property
def modification_time(self):
if self.wait() == 0:
return END_OF_TIME
return datetime.datetime(1969, 12, 31)
def make(self):
if self.wait() == 0:
self.debug('Dependency build exited with return code 0 '
'=> satisfied')
return BEGINNING_OF_TIME
self.debug('Dependency build exited with return code !=0 '
'=> Will trigger ancestors build methods')
return END_OF_TIME
class BuildException(Exception):
pass |
michaelaye/vispy | examples/basics/visuals/arrows_quiver.py | Python | bsd-3-clause | 2,742 | 0.000365 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
This example shows how to use the `ArrowVisual` for a quiver plot
"""
from __future__ import division
import sys
import itertools
import numpy as np
from vispy import app, gloo, visuals
from vispy.visuals.transforms import NullTransform
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, title="Quiver plot", keys="interactive",
size=(830, 430))
self.arrow_length = 20
self.grid_coords = None
self.line_vertices = None
self.last_mouse = (0, 0)
self.generate_grid()
self.visual = visuals.ArrowVisual(
color='white',
connect='segments',
arrow_size=8
)
self.visual.events.update.connect(lambda evt: self.update())
self.visual.transform = NullTransform()
self.show()
def generate_grid(self):
num_cols = int(self.physical_size[0] / 50)
num_rows = int(self.physical_size[1] / 50)
coords = []
# Generate grid
for i, j in itertools.product(range(num_rows), range(num_cols)):
x = 25 + (50 * j)
y = 25 + (50 * i)
coords.append((x, y))
self.grid_coords = np.array(c | oords)
def on_resize(self, event):
self.generate_grid()
self.rotate_arrows(np.array(self.last_mouse))
vp = (0, 0, self.physical_size[0], self.physical_size[1])
self.context.set_viewport(*vp)
self.visual.transforms.configure(canvas=self, viewport=vp)
def rotate_arrows(self, point_towards):
d | irection_vectors = (self.grid_coords - point_towards).astype(
np.float32)
norms = np.sqrt(np.sum(direction_vectors**2, axis=-1))
direction_vectors[:, 0] /= norms
direction_vectors[:, 1] /= norms
vertices = np.repeat(self.grid_coords, 2, axis=0)
vertices[::2] = vertices[::2] + ((0.5 * self.arrow_length) *
direction_vectors)
vertices[1::2] = vertices[1::2] - ((0.5 * self.arrow_length) *
direction_vectors)
self.visual.set_data(
pos=vertices,
arrows=vertices.reshape((len(vertices)//2, 4)),
)
def on_mouse_move(self, event):
self.last_mouse = event.pos
self.rotate_arrows(np.array(event.pos))
def on_draw(self, event):
gloo.clear('black')
self.visual.draw()
if __name__ == '__main__':
win = Canvas()
if sys.flags.interactive != 1:
app.run()
|
toenuff/treadmill | lib/python/treadmill/kafka/__init__.py | Python | apache-2.0 | 6,933 | 0 | """Treadmill Kafka API"""
import fnmatch
import logging
import os
import re
import socket
from .. import admin as tadmin
from .. import context
from .. import dnsutils
from .. import discovery
from .. import fs
from .. import zkutils
_LOGGER = logging.getLogger(__name__)
KAFKA_ZK_ROOT = 'kafka'
DEFAULT_KAFKA_DIR = '/var/tmp/kafka'
RUN_CLASS_SCRIPT_NAME = 'kafka_run_class'
KAFKA_APP_PATTERN = '*.kafka.*'
DEFAULT_BROKER_ENDPOINT_NAME = 'client'
def setup_env(kafka_dir=DEFAULT_KAFKA_DIR, with_data_dir=False, server=False):
"""Setup the Kafka environemtn, like log and data directories.
:param kafka_dir: an optional Kafka directory, default is
kafka.DEFAULT_KAFKA_DIR
:type kafka_dir: string
:param data_dir: setup the data directory too, defaul is False
:type data_dir: bool
"""
kafka_env = {
'APP_LOG': app_log(kafka_dir),
}
if os.environ.get('JAVA_HOME') is not None:
kafka_env['JAVA_HOME'] = os.environ.get('JAVA_HOME')
kafka_log_dir = log_dir(kafka_dir)
kafka_env['LOG_DIR'] = kafka_log_dir
fs.mkdir_safe(kafka_log_dir)
if with_data_dir:
datadir = data_dir(kafka_dir)
fs.mkdir_safe(datadir)
kafka_env['DATA_DIR'] = datadir
if server:
kafka_env['USE64BITJVM'] = '1'
kafka_env['KAFKA_HEAP_OPTS'] = '-Xmx4G -Xms4G'
kafka_env['JVM_ARGUMENTS'] = (
'-XX:+UnlockCommercialFeatures '
'-XX:+FlightRecorder -agentlib:'
'jdwp=transport=dt_socket,'
'server=y,address=8011,suspend=n'
)
return kafka_env
def log_dir(kafka_dir=DEFAULT_KAFKA_DIR):
"""Get the Kafka log directory
:param kafka_dir: an optional Kafka directory, default is
kafka.DEFAULT_KAFKA_DIR
:type kafka_dir: string
"""
return os.path.join(kafka_dir, 'logs')
def app_log(kafka_dir=DEFAULT_KAFKA_DIR):
"""Get Kafka log file, full path.
:param kafka_dir: an optional Kafka directory, default is
kafka.DEFAULT_KAFKA_DIR
:type kafka_dir: string
"""
return os.path.join(kafka_dir, 'log', 'kafka.log')
def data_dir(kafka_dir=DEFAULT_KAFKA_DIR):
"""Get the Kafka data directory
:param kafka_dir: an optional Kafka directory, default is
kafka.DEFAULT_KAFKA_DIR
:type kafka_dir: string
"""
return os.path.join(kafka_dir, 'data')
def zk_instances_by_zkurl(zkurl, zkroot=KAFKA_ZK_ROOT):
"""Get the Zookeeper instances suitable for Kafka by ZK URL
:param zkurl: the Zookeeper URL
:type zkurl: string
:param zkroot: the Zookeeper chroot for this Kafka
:type zkroot: string
"""
zk_servers = os.path.join(
re.sub(r'^.*[@]', '', zkurl), zkroot
)
_LOGGER.debug('zk_servers: %s', zk_servers)
return zk_servers
def get_replica(brokers):
"""Get the number of replicas, essentially this is len(get_brokers())
See kafka.get_brokers() for more details on the arguments
"""
replica = 0
for broker in brokers:
if _is_broker_up(broker):
replica = replica + 1
return replica
def run_class_script():
"""Get the Kafka run class"""
return RUN_CLASS_SCRIPT_NAME
def _get_kafka_endpoint(zkclient, app_pattern, endpoint, watcher_cb=None):
"""Get the Kafka client endpoint host and cell
:param zkclient: a zkclient
:type zkclient: kazoo.client
:param app_pattern: the Kafka broker app pattern, e.g. treadmlp.kafka-*
:type app_pattern: string
:param endpoint: the Kafka broker client endpoint name in the app, default
is DEFAULT_BROKER_ENDPOINT_NAME
:type endpoint: string
"""
app_discovery = discovery.Discovery(zkclient, app_pattern, endpoint)
if watcher_cb:
endpoints = app_discovery.get_endpoints_zk(watch_cb=watcher_cb)
else:
endpoints = app_discovery.get_endpoints()
return endpoints
def _is_broker_up(hostport):
"""Test whether a broker is up"""
try:
host, port = hostport.split(':')
timeout = 3
socket.create_connection((host, port), timeout)
return True
except socket.error:
pass
return False
def get_brokers(cellname, domain, zkclient, app_pattern=None,
endpoint=DEFAULT_BROKER_ENDPOINT_NAME,
watcher_cb=None):
"""Get the Kafka broker host and ports for the supplied cell
:param cellname: a cell
:type cellname: str
:param domain: Treadmill DNS domain
:type domain: str
:param zkclient: a ZZookeeper client
:type zkclient: kazoo.client
:param app_pattern: the Kafka broker app pattern, e.g. treadmlp.kafka.*
:type app_pattern: string
:param endpoint: the Kafka broker client endpoint name in the app, default
is | DEFAULT_BROKER_ENDPOINT_NAME
:type endpoint: string
:param watcher_cb: ZK watcher callback; if the endpoints change, call this.
Only valid if you set both app_pattern and endpoint
:type watcher_cb: func
"""
brokers = get_master_brokers(cellname, domain)
if brokers:
for hostport in brokers:
# if at least one broker is up, then we are good; the reason for
# this is that we could have DNS record | s setup but no Kafka broker
# servers running on the hosts.
if _is_broker_up(hostport):
return brokers
brokers = []
admin_cell = tadmin.Cell(context.GLOBAL.ldap.conn)
cell = admin_cell.get(cellname)
for master in cell.get('masters', []):
port = master.get('kafka-client-port')
if port is None:
continue
hostport = '{0}:{1}'.format(master['hostname'], port)
brokers.append(hostport)
if brokers:
return brokers
if app_pattern:
# TODO: pylint complains about:
# Redefinition of brokers type from list to set
# pylint: disable=R0204
brokers = _get_kafka_endpoint(zkclient, app_pattern, endpoint,
watcher_cb=watcher_cb)
if brokers:
return brokers
admin_app = tadmin.Application(context.GLOBAL.ldap.conn)
matched_apps = admin_app.list({'_id': KAFKA_APP_PATTERN})
_LOGGER.debug('matched_apps: %r', matched_apps)
kafka_apps = [app['_id'] for app in matched_apps]
for app in kafka_apps:
kbrokers = _get_kafka_endpoint(zkclient, app, endpoint)
if kbrokers:
brokers.extend(kbrokers)
return brokers
def get_master_brokers(cell, domain):
"""Get the masker Kafka brokers
:param cell: a cell
:type cell: str
:param broker_id: a specific broker id, else, all brokers are returned
:type broker_id: int
:returns: a list of host:ports
"""
label = '_kafka._tcp.{0}.{1}'.format(cell, domain)
return ['{0}:{1}'.format(host, port)
for (host, port, _prio, _weight) in dnsutils.srv(label)]
|
qilicun/python | python2/PyMOTW-1.132/PyMOTW/mailbox/mailbox_maildir_folders.py | Python | gpl-3.0 | 855 | 0.004678 | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
__version__ = "$Id$"
#end_pymotw_header
import mailbox
import os
def show_maildir(name):
os.system('find %s -print' % name)
mbox = mailbox.Maildir('Example')
print 'Before:', mbox.list_folders()
show_maildir('Example')
print
print '#' * 30
print
mbox.add_folder('subfolder')
print 'subfolder created:', mbox.list_folders()
show_maildir('Ex | ample')
subfolder = mbox.get_folder('subfolder')
print 'subfolder contents:', subfo | lder.list_folders()
print
print '#' * 30
print
subfolder.add_folder('second_level')
print 'second_level created:', subfolder.list_folders()
show_maildir('Example')
print
print '#' * 30
print
subfolder.remove_folder('second_level')
print 'second_level removed:', subfolder.list_folders()
show_maildir('Example') |
leouieda/tesseroids-original | cookbook/simple_tess/plot.py | Python | bsd-3-clause | 1,058 | 0.008507 | """
| Plot the columns of the output files
"""
import sys
import pylab
from mpl_toolkits.basemap import Basemap
# Set up a projection
bm = Basemap | (projection='ortho', lon_0=0, lat_0=0,
resolution='l', area_thresh=10000)
data = pylab.loadtxt(sys.argv[1], unpack=True)
shape = (int(sys.argv[2]), int(sys.argv[3]))
lon = pylab.reshape(data[0], shape)
lat = pylab.reshape(data[1], shape)
glon, glat = bm(lon, lat)
for i, value in enumerate(data[3:]):
value = pylab.reshape(value, shape)
pylab.figure(figsize=(4, 3))
pylab.title("Column %d" % (i + 4))
#bm.drawcoastlines()
#bm.fillcontinents(color='coral',lake_color='aqua')
#bm.drawmapboundary(fill_color='aqua')
#bm.drawmapboundary()
#bm.drawparallels(pylab.arange(-90.,120.,30.))
#bm.drawmeridians(pylab.arange(0.,420.,60.))
bm.bluemarble()
#bm.pcolor(glon, glat, value)
bm.contourf(glon, glat, value, 15)
pylab.colorbar()
#bm.contour(glon, glat, value, 15, linewidths=1.5)
#pylab.colorbar()
pylab.savefig('column%d.png' % (i + 4))
|
hugombarreto/credibility_allocation | visualizations/utility.py | Python | mit | 645 | 0.021773 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import rc
x = np.arange(-10, 10, 0.1)
y = np.minimum(x,2)
z = np.minimum(0,x+2)
fig, axes = plt.subplots(1, 2, sharey=True)
fig.set_size_inches(9, 3)
axes[0].plot(x,y)
axes[1].plot(x,z)
for ax in axe | s:
| ax.set_xlim([-4,5])
ax.set_ylim([-4,3])
ax.set_xlabel(u"Alocação ($o_i$)")
ax.set_ylabel('Utilidade ($u_i$)')
axes[0].set_title("$\\theta_i=2$")
axes[1].set_title('$\\theta_i=-2$')
plt.savefig('../plots/utility.pdf', bbox_inches='tight')
# plt.show()
|
spring-week-topos/cinder-week | cinder/image/glance.py | Python | apache-2.0 | 18,489 | 0.000216 | # Copyright 2010 OpenStack Foundation
# Copyright 2013 NTT corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an image service that uses Glance as the backend"""
from __future__ import absolute_import
import copy
import itertools
import random
import shutil
import sys
import time
import glanceclient.exc
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
from cinder import exception
from cinder.openstack.common import jsonutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
glance_opts = [
cfg.ListOpt('allowed_direct_url_schemes',
default=[],
help='A list of url schemes that can be downloaded directly '
'via the direct_url. Currently supported schemes: '
'[file].'),
]
CONF = cfg.CONF
CONF.register_opts(glance_opts)
CONF.import_opt('glance_api_version', 'cinder.common.config')
LOG = logging.getLogger(__name__)
def _parse_image_ref(image_href):
"""Parse an image href into composite parts.
:param image_href: href of an image
:returns: a tuple of the form (image_id, netloc, use_ssl)
:raises ValueError
"""
url = urlparse.urlparse(image_href)
netloc = url.netloc
image_id = url.path.split('/')[-1]
use_ssl = (url.scheme == 'https')
return (image_id, netloc, use_ssl)
def _crea | te_glance_client(context, netloc, use_ssl,
version=CONF.glance_api_version):
"""Instantiate a new glanceclient.Client object."""
if | version is None:
version = CONF.glance_api_version
params = {}
if use_ssl:
scheme = 'https'
# https specific params
params['insecure'] = CONF.glance_api_insecure
params['ssl_compression'] = CONF.glance_api_ssl_compression
else:
scheme = 'http'
if CONF.auth_strategy == 'keystone':
params['token'] = context.auth_token
if CONF.glance_request_timeout is not None:
params['timeout'] = CONF.glance_request_timeout
endpoint = '%s://%s' % (scheme, netloc)
return glanceclient.Client(str(version), endpoint, **params)
def get_api_servers():
"""Return Iterable over shuffled api servers.
Shuffle a list of CONF.glance_api_servers and return an iterator
that will cycle through the list, looping around to the beginning
if necessary.
"""
api_servers = []
for api_server in CONF.glance_api_servers:
if '//' not in api_server:
api_server = 'http://' + api_server
url = urlparse.urlparse(api_server)
netloc = url.netloc
use_ssl = (url.scheme == 'https')
api_servers.append((netloc, use_ssl))
random.shuffle(api_servers)
return itertools.cycle(api_servers)
class GlanceClientWrapper(object):
"""Glance client wrapper class that implements retries."""
def __init__(self, context=None, netloc=None, use_ssl=False,
version=None):
if netloc is not None:
self.client = self._create_static_client(context,
netloc,
use_ssl, version)
else:
self.client = None
self.api_servers = None
self.version = version
def _create_static_client(self, context, netloc, use_ssl, version):
"""Create a client that we'll use for every call."""
self.netloc = netloc
self.use_ssl = use_ssl
self.version = version
return _create_glance_client(context,
self.netloc,
self.use_ssl, self.version)
def _create_onetime_client(self, context, version):
"""Create a client that will be used for one call."""
if self.api_servers is None:
self.api_servers = get_api_servers()
self.netloc, self.use_ssl = self.api_servers.next()
return _create_glance_client(context,
self.netloc,
self.use_ssl, version)
def call(self, context, method, *args, **kwargs):
"""Call a glance client method.
If we get a connection error,
retry the request according to CONF.glance_num_retries.
"""
version = self.version
if version in kwargs:
version = kwargs['version']
retry_excs = (glanceclient.exc.ServiceUnavailable,
glanceclient.exc.InvalidEndpoint,
glanceclient.exc.CommunicationError)
num_attempts = 1 + CONF.glance_num_retries
for attempt in xrange(1, num_attempts + 1):
client = self.client or self._create_onetime_client(context,
version)
try:
return getattr(client.images, method)(*args, **kwargs)
except retry_excs as e:
netloc = self.netloc
extra = "retrying"
error_msg = _("Error contacting glance server "
"'%(netloc)s' for '%(method)s', "
"%(extra)s.") % {'netloc': netloc,
'method': method,
'extra': extra,
}
if attempt == num_attempts:
extra = 'done trying'
error_msg = _("Error contacting glance server "
"'%(netloc)s' for '%(method)s', "
"%(extra)s.") % {'netloc': netloc,
'method': method,
'extra': extra,
}
LOG.exception(error_msg)
raise exception.GlanceConnectionFailed(reason=e)
LOG.exception(error_msg)
time.sleep(1)
class GlanceImageService(object):
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self, client=None):
self._client = client or GlanceClientWrapper()
def detail(self, context, **kwargs):
"""Calls out to Glance for a list of detailed image information."""
params = self._extract_query_params(kwargs)
try:
images = self._client.call(context, 'list', **params)
except Exception:
_reraise_translated_exception()
_images = []
for image in images:
if self._is_image_available(context, image):
_images.append(self._translate_from_glance(image))
return _images
def _extract_query_params(self, params):
_params = {}
accepted_params = ('filters', 'marker', 'limit',
'sort_key', 'sort_dir')
for param in accepted_params:
if param in params:
_params[param] = params.get(param)
# ensure filters is a dict
_params.setdefault('filters', {})
# NOTE(vish): don't filter out private images
_params['filters'].setdefault('is_public', 'none')
return _params
def show(self, context, image_id):
"""Returns a dict with image data for the given opaque image id."""
try:
image = self._client.call(context, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
|
xR86/ml-stuff | labs-python/gists/stub-graceful-shutdown.py | Python | mit | 581 | 0.003442 | import time
while True:
try:
time.sleep(1) # do something here
print '.',
except KeyboardInterrupt:
print '\nPausing... (Hit ENTER to continue | , type quit to exit.)'
try:
response = raw_input()
if response == ' | quit':
break
print 'Resuming...'
except KeyboardInterrupt:
print 'Resuming...'
continue
'''
import errno
try:
# do something
result = conn.recv(bufsize)
except socket.error as (code, msg):
if code != errno.EINTR:
raise
''' |
xinpuyuandu/awesome-python3-webapp | www/models.py | Python | gpl-3.0 | 1,617 | 0.005566 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time, uuid
from orm import Model, StringField, BooleanField, FloatField, TextField
def next_id():
return '%015d%s000' % (int(time.time() * 1000), uuid.uuid4().hex)
class User(Model):
__table__ = 'users'
id = StringField(primary_key=T | rue, default=next_id, ddl='varchar(50)')
email = StringField(ddl='varchar(50)')
passwd = StringField(ddl='varchar(50)')
admin = BooleanField()
name = StringField(ddl='varchar(50)')
image = StringField(ddl='varchar(500)')
created_at = FloatField(default=time.time)
def __init__(self, **kw):
super(User, self).__init__(**kw)
class Blog(Model):
__table__ = 'blogs'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
user_id = StringFiel | d(ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(500)')
name = StringField(ddl='varchar(50)')
summary = StringField(ddl='varchar(200)')
content = TextField()
created_at = FloatField(default=time.time)
def __init__(self, **kw):
super(Blog, self).__init__(**kw)
class Comment(Model):
__table__ = 'comments'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
blog_id = StringField(ddl='varchar(50)')
user_id = StringField(ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(500)')
content = TextField()
created_at = FloatField(default=time.time)
def __init__(self, **kw):
super(Comment, self).__init__(**kw) |
mkli90/tekmate | setup.py | Python | gpl-2.0 | 765 | 0 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from setuptools import setup
setup(name="tekmate",
version="0.0.1",
description="Tekmate - Stargate-Based Point'n'Click",
author="Max",
p | ackages=['tekmate'],
license="GPLv3",
url="https://github.com/mkli90/tekmate",
package_data={
| 'tmz': ['LICENSE']
},
classifiers=[
"Development Status :: 1 - Planning",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Natural Language :: English",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 2.7",
"Topic :: Games/Entertainment",
"Topic :: Software Development :: Libraries",
],
)
|
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/route_filter.py | Python | mit | 2,621 | 0.000763 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class RouteFilter(Resource):
"""Route Filter Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param rules: Collection of RouteFilterRules contained within a route
filter.
:type rules: list[~azure.mgmt.network.v2018_01_01.models.RouteFilt | erRule]
:param peerings: A collection of references to express route circuit
peerings.
:type peerings:
list[~azure.mgmt.network.v2018_01_01.models.ExpressRouteCircuitPeering]
:ivar provisioning_state: The provisioning state of the resource. Possible
values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
:vartype provisioning_state: str
:ivar etag: Gets a unique read-only string | that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'rules': {'key': 'properties.rules', 'type': '[RouteFilterRule]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(RouteFilter, self).__init__(**kwargs)
self.rules = kwargs.get('rules', None)
self.peerings = kwargs.get('peerings', None)
self.provisioning_state = None
self.etag = None
|
ihartung/460-Lab1 | networks/network.py | Python | gpl-2.0 | 3,070 | 0.001303 | import re
import sys
sys.path.append('..')
from src.link import Link
from src.node import Node
class Network(object):
def __init__(self, config):
self.config = config
self.nodes = {}
self.address = 1
self.build()
def build(self):
state = 'network'
with open(self.config) as f:
for line in f.readlines():
if line.startswith('#'):
continue
if line == "\n":
state = 'links'
if state == 'network':
self.create_network(line)
elif state == 'links':
self.configure_link(line)
def create_network(self, line):
fields = line.split()
if len(fields) < 2:
return
start = self.get_node(fields[0])
for i in range(1, len(fields)):
end = self.get_node(fields[i])
l = Link(self.address, start, endpoint=end)
self.address += 1
start.add_link(l)
def configure_link(self, line):
fields = line.split()
if len(fields) < 3:
return
start = self.get_node(fields[0])
l = start.get_link(fields[1])
for i in range(2, len(fields)):
if fields[i].endswith("bps"):
self.set_bandwidth(l, fields[i])
if fields[i].endswith("ms"):
self.set_delay(l, fields[i])
if fields[i].endswith("seconds"):
self.set_delay(l, fields[i])
if fields[i].endswith("pkts"):
self.set_queue(l, fields[i])
if fields[i].endswith("loss"):
self.set_loss(l, fields[i])
def get_node(self, name):
if name not in self.nodes:
self.nodes[name] = Node(name)
return self.nodes[name]
def loss(self, loss):
for node in self.nodes.values():
for link in node.links:
link.loss = loss
def set_bandwidth(self, link, rate):
numeric_rate = self.convert(rate)
if rate.endswith("Gbps"):
link.bandwidth = numeric_rate * 1000000000
elif rate.endswith("Mbps"):
| link.bandwi | dth = numeric_rate * 1000000
elif rate.endswith("Kbps"):
link.bandwidth = numeric_rate * 1000
elif rate.endswith("bps"):
link.bandwidth = numeric_rate
def set_delay(self, link, delay):
numeric_delay = self.convert(delay)
if delay.endswith("ms"):
link.propagation = numeric_delay / 1000.0
if delay.endswith("seconds"):
link.propagation = numeric_delay
def set_queue(self, link, size):
numeric_size = self.convert(size)
if size.endswith("pkts"):
link.queue_size = numeric_size
def set_loss(self, link, loss):
numeric_loss = self.convert(loss)
if loss.endswith("loss"):
link.loss = numeric_loss
@staticmethod
def convert(value):
return float(re.sub("[^0-9.]", "", value))
|
jawilson/home-assistant | tests/components/myq/test_cover.py | Python | apache-2.0 | 1,603 | 0.001871 | """The scene tests for the myq platform."""
from homeassistant.const import STATE_CLOSED
from .util import async_init_integration
async def test | _create_covers(hass):
"""Test creation of covers."""
await async_init_integration(hass)
state = hass.states.get("cover.large_garage_door")
assert state.state == STATE_CLOSED
expected_attributes = {
"device_class": "garage",
"friendly_name": "Large Garage Door",
"supported_features": 3,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attrib | utes[key] for key in expected_attributes
)
state = hass.states.get("cover.small_garage_door")
assert state.state == STATE_CLOSED
expected_attributes = {
"device_class": "garage",
"friendly_name": "Small Garage Door",
"supported_features": 3,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("cover.gate")
assert state.state == STATE_CLOSED
expected_attributes = {
"device_class": "gate",
"friendly_name": "Gate",
"supported_features": 3,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
|
pausan/python-maven | mavenrepo.py | Python | mit | 9,533 | 0.021609 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import requests
import shutil
import xmltodict
from maven import Maven
from mavencoord import MavenCoord
from mavenversiondb import MavenVersionDb
import mavenversioncmp as mavenvercmp
import mavenparser
class MavenRepo:
""" Manages the dependencies and downloads of a maven repository
"""
OFFICIAL_REPO_URL = 'https://repo.maven.apache.org/maven2/'
def __init__ (
self,
url = OFFICIAL_REPO_URL,
versionDb = None,
cacheDir = '_maven-cache'
):
self._cacheDir = cacheDir
self._repoUrl = url
self._versionDb = MavenVersionDb ()
self._scheduledDownloads = {}
self._jdkVersion = Maven.DEFAULT_JDK_VERSION
if isinstance (versionDb, basestring):
self._versionDb = MavenVersionDb()
self._versionDb.parseFile (versionDb)
elif isinstance (versionDb, MavenVersionDb):
self._versionDb = versionDb
# prepare cache dir
if self._cacheDir and (not os.path.exists (self._cacheDir)):
os.makedirs (self._cacheDir)
return
def setJdkVersion (self, jdkVersion):
""" This version is used when resolving all maven objects. The value
specified here will be used by default when downloading items from
given repository.
"""
self._jdkVersion = jdkVersion
return
def cleanCache (self):
""" Cleans the complete cache directory. Please keep in mind that this
method is not thread safe.
"""
if not os.path.exists (self._cacheDir):
return
shutil.rmtree (self._cacheDir)
os.makedirs (self._cacheDir)
return
def getMetadataUrlFor (self, coord):
""" Returns metadata URL to get information about a package
"""
coord = MavenCoord (coord)
baseurl = "%(group)s/%(artifact)s/maven-metadata.xml" % {
'group' : '/'.join (coord.group.split('.')),
'artifact' : coord.artifact
}
return self._repoUrl.rstrip('/') + '/' + baseurl.lstrip('/')
def getBaseUrlFor (self, coord):
coord = MavenCoord (coord)
baseurl = "%(group)s/%(artifact)s/%(version)s/%(artifact)s-%(version)s" % {
'group' : '/'.join (coord.group.split('.')),
'artifact' : coord.artifact,
'version' : coord.version
}
return self._repoUrl.rstrip('/') + '/' + baseurl.lstrip('/')
def getJarUrlFor (self, coord):
""" Returns the URL for downloading the artifact for given coordinate
"""
return self.getBaseUrlFor (coord) + '.jar'
def getPomUrlFor (self, coord):
""" Returns the URL for downloading given coordinate
"""
return self.getBaseUrlFor (coord) + '.pom'
def resolveCoord (self, coord):
""" Resolve coordinate so it has group, artifact and version numbers
Returns a valid MavenCoord object or None if it could not figure out
a valid version.
"""
coord = MavenCoord (coord)
if coord.version:
return coord
newCoord = self._versionDb.find (coord)
if newCoord:
return newCoord
# finally, let's download latest version from metadata file
metadataUrl = self.getMetadataUrlFor (coord)
metadataString = self._download2string (metadataUrl)
if not metadataString:
# raise Exception ("Cannot find out the coord version for: %s" % coord.id)
return None
obj = xmltodict.parse (metadataString)
# get latest release version
metadata = obj.get ('metadata', {})
v | ersioning = metadata.get ('versio | ning', {})
lastReleaseVersion = versioning.get ('release', None)
if not lastReleaseVersion:
return None
coord.version = lastReleaseVersion
return coord
def fetchOne (self, coord):
""" Fetch maven file from coordinate
"""
coord = self.resolveCoord (coord)
if not coord:
return None
data = self._download2string (self.getPomUrlFor (coord))
if data:
return mavenparser.parseString (data)
return None
def fetchWithAncestors (self, coord):
""" Fetch maven file from coordinate
"""
maven = self.fetchOne (coord)
if not maven:
return None
parentCoord = maven.parent
while parentCoord and (not parentCoord.empty()):
mavenParent = self.fetchOne (parentCoord)
if (not mavenParent):
break
maven.merge (mavenParent)
parentCoord = mavenParent.parent
return maven
def fetchResolvedTree (self, coord, scope):
""" Recursively gets all the dependencies for given POM Coordinate
"""
assert isinstance (scope, basestring)
coord = self.resolveCoord (coord)
if not coord:
return None
return self._fetchTreeDeps (
coord,
scope,
downloadedItems = {},
exclusions = {}
)
def downloadUrl (self, downloadUrl):
""" Downloads given URL and saves the file in the cache dir, in case
the file is already there, it won't download the file.
Returns the path where the file is stored.
"""
jarFileName = downloadUrl.split('/')[-1]
destJarPath = os.path.join (self._cacheDir, jarFileName)
if os.path.exists (destJarPath):
return destJarPath
r = requests.get(downloadUrl, stream=True)
with open(destJarPath, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return destJarPath
def downloadArtifacts (self, coord, scope):
""" Resolves all dependencies for given coord and downloads all artifacts
"""
if isinstance(coord, list):
result = []
for c in coord:
result.extend (self.downloadArtifacts (c, scope))
return result
mavenObj = self.fetchResolvedTree (coord, scope)
if not mavenObj:
return []
result = []
for coord in [MavenCoord(coord)] + mavenObj.deps.getFlattenCoords():
normCoord = self._versionDb.findOrRegister (coord)
if normCoord.version and coord.version:
if mavenvercmp.compare (coord.version, normCoord.version) > 0:
print (
"WARNING: it seems you are downloading an outdated version (update your version DB):\n"
" - proposal: %s\n"
" - using: %s" % (coord, normCoord)
)
downloadUrl = self.getJarUrlFor (normCoord)
destJarPath = self.downloadUrl (downloadUrl)
result.append (destJarPath)
return result
def _fetchTreeDeps (self, coord, scope, downloadedItems, exclusions):
""" Downloads given coordinate and its dependencies recursively for given
scope. All downloaded dependencies will be added to downloadedItems to avoid
recursion.
For quick lookups exclusions is mapping the coord.name with the actual
coord excluded.
"""
# is maven object in cache downloadedItems ['<group:artifact>']
if coord.name in downloadedItems:
if (downloadedItems [coord.name].coord.id != coord.id):
if mavenvercmp.compare (
downloadedItems [coord.name].coord.id,
coord.id
) < 0:
# TODO: adjust versions versions
print "WARNING: expecting same coord id for package '%s' vs '%s'" % (coord.id, downloadedItems [coord.name].coord.id)
return downloadedItems [coord.name]
maven = self.fetchWithAncestors (coord)
if not maven:
return None
maven.resolve (jdkVersion = self._jdkVersion)
# TODO: handle provided
children = {}
for dep in maven.deps.getFlattenDeps(skipOptional = True):
if (dep.coord.scope != scope):
continue
if dep.coord.name in exclusions:
if dep.coord.isContained (exclusions [dep.coord.name]):
# exclude dep
continue
# build the new exclusion list based on current dep
newExclusions = exclusions.copy()
for exclusion in dep.exclusions:
newExclusions[exclusion.name] = exclusion
# fetch child with deps
mavenChild = self._fetchTreeDeps (
dep.coord,
scope,
downloadedItems,
newExclusions
)
if mavenChild:
mavenChild.resolve (jdkVersion = self._jdkVersion)
children [dep.coord.id] = mavenChild
# update dependencies of this element
for dep in maven.deps.root.deps:
if dep.coord.id not in children:
|
mchristopher/PokemonGo-DesktopMap | app/pylibs/win32/Cryptodome/SelfTest/Util/test_asn1.py | Python | mit | 29,700 | 0.007407 | #
# SelfTest/Util/test_asn.py: Self-test for the Cryptodome.Util.asn1 module
#
# ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
"""Self-tests for Cryptodome.Util.asn1"""
import unittest
from Cryptodome.Util.py3compat import *
from Cryptodome.Util.asn1 import (DerObject, DerSetOf, DerInteger,
DerBitString,
DerObjectId, DerNull, DerOctetString,
DerSequence)
class DerObjectTests(unittest.TestCase):
def testObjInit1(self):
# Fail with invalid tag format (must be 1 byte)
self.assertRaises(ValueError, DerObject, b('\x00\x99'))
# Fail with invalid implicit tag (must be <0x1F)
self.assertRaises(ValueError, DerObject, 0x1F)
# ------
def testObjEncode1(self):
# No payload
der = DerObject(b('\x02'))
self.assertEquals(der.encode(), b('\x02\x00'))
# Small payload (primitive)
| der.payload = b('\x45')
self.assertEquals(der.encode(), b('\x02\x01\x45'))
# Invariant
self.assertEquals(der.encode(), b('\x02\x01\x45'))
# Initialize with numerical tag
der = DerObject(0x04)
der.payload = b('\x45')
| self.assertEquals(der.encode(), b('\x04\x01\x45'))
# Initialize with constructed type
der = DerObject(b('\x10'), constructed=True)
self.assertEquals(der.encode(), b('\x30\x00'))
def testObjEncode2(self):
# Initialize with payload
der = DerObject(0x03, b('\x12\x12'))
self.assertEquals(der.encode(), b('\x03\x02\x12\x12'))
def testObjEncode3(self):
# Long payload
der = DerObject(b('\x10'))
der.payload = b("0")*128
self.assertEquals(der.encode(), b('\x10\x81\x80' + "0"*128))
def testObjEncode4(self):
# Implicit tags (constructed)
der = DerObject(0x10, implicit=1, constructed=True)
der.payload = b('ppll')
self.assertEquals(der.encode(), b('\xa1\x04ppll'))
# Implicit tags (primitive)
der = DerObject(0x02, implicit=0x1E, constructed=False)
der.payload = b('ppll')
self.assertEquals(der.encode(), b('\x9E\x04ppll'))
def testObjEncode5(self):
# Encode type with explicit tag
der = DerObject(0x10, explicit=5)
der.payload = b("xxll")
self.assertEqual(der.encode(), b("\xa5\x06\x10\x04xxll"))
# -----
def testObjDecode1(self):
# Decode short payload
der = DerObject(0x02)
der.decode(b('\x02\x02\x01\x02'))
self.assertEquals(der.payload, b("\x01\x02"))
self.assertEquals(der._tag_octet, 0x02)
def testObjDecode2(self):
# Decode long payload
der = DerObject(0x02)
der.decode(b('\x02\x81\x80' + "1"*128))
self.assertEquals(der.payload, b("1")*128)
self.assertEquals(der._tag_octet, 0x02)
def testObjDecode3(self):
# Decode payload with too much data gives error
der = DerObject(0x02)
self.assertRaises(ValueError, der.decode, b('\x02\x02\x01\x02\xFF'))
# Decode payload with too little data gives error
der = DerObject(0x02)
self.assertRaises(ValueError, der.decode, b('\x02\x02\x01'))
def testObjDecode4(self):
# Decode implicit tag (primitive)
der = DerObject(0x02, constructed=False, implicit=0xF)
self.assertRaises(ValueError, der.decode, b('\x02\x02\x01\x02'))
der.decode(b('\x8F\x01\x00'))
self.assertEquals(der.payload, b('\x00'))
# Decode implicit tag (constructed)
der = DerObject(0x02, constructed=True, implicit=0xF)
self.assertRaises(ValueError, der.decode, b('\x02\x02\x01\x02'))
der.decode(b('\xAF\x01\x00'))
self.assertEquals(der.payload, b('\x00'))
def testObjDecode5(self):
# Decode payload with unexpected tag gives error
der = DerObject(0x02)
self.assertRaises(ValueError, der.decode, b('\x03\x02\x01\x02'))
def testObjDecode6(self):
# Arbitrary DER object
der = DerObject()
der.decode(b('\x65\x01\x88'))
self.assertEquals(der._tag_octet, 0x65)
self.assertEquals(der.payload, b('\x88'))
def testObjDecode7(self):
# Decode explicit tag
der = DerObject(0x10, explicit=5)
der.decode(b("\xa5\x06\x10\x04xxll"))
self.assertEquals(der._tag_octet, 0x10)
self.assertEquals(der.payload, b('xxll'))
# Explicit tag may be 0
der = DerObject(0x10, explicit=0)
der.decode(b("\xa0\x06\x10\x04xxll"))
self.assertEquals(der._tag_octet, 0x10)
self.assertEquals(der.payload, b('xxll'))
def testObjDecode8(self):
# Verify that decode returns the object
der = DerObject(0x02)
self.assertEqual(der, der.decode(b('\x02\x02\x01\x02')))
class DerIntegerTests(unittest.TestCase):
def testInit1(self):
der = DerInteger(1)
self.assertEquals(der.encode(), b('\x02\x01\x01'))
def testEncode1(self):
# Single-byte integers
# Value 0
der = DerInteger(0)
self.assertEquals(der.encode(), b('\x02\x01\x00'))
# Value 1
der = DerInteger(1)
self.assertEquals(der.encode(), b('\x02\x01\x01'))
# Value 127
der = DerInteger(127)
self.assertEquals(der.encode(), b('\x02\x01\x7F'))
def testEncode2(self):
# Multi-byte integers
# Value 128
der = DerInteger(128)
self.assertEquals(der.encode(), b('\x02\x02\x00\x80'))
# Value 0x180
der = DerInteger(0x180L)
self.assertEquals(der.encode(), b('\x02\x02\x01\x80'))
# One very long integer
der = DerInteger(2L**2048)
self.assertEquals(der.encode(),
b('\x02\x82\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
|
Sylvaner/Mosquito | test/scripts/create_config_file.py | Python | gpl-2.0 | 1,564 | 0.033887 | #!/usr/bin/python3
##
# Mosquito Media Player.
# one line to give the program's name and an idea of what it does.
# Copyright (C) 2015 - Sylvain Dangin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
##
import os
import sys
import json
# Open config file
try:
configFile = open(os.getcwd()+"/config/config.json" | )
config = json.load(configFile)
configFile.close()
except FileNotFoundError:
print("Config file not found.")
sys.exit(1)
# Open test config file for database
try:
configFile = open(os.getcwd()+"/test/config/config_test.json")
configTest = json | .load(configFile)
config["database"] = configTest["database"]
configFile.close()
except FileNotFoundError:
print("Config file not found.")
sys.exit(1)
tmpConfigFilePath = os.getcwd()+"/test/tmp/config.json"
tmpConfigFile = open(tmpConfigFilePath, 'w+')
json.dump(config, tmpConfigFile)
tmpConfigFile.close()
|
sid88in/incubator-airflow | airflow/migrations/versions/1507a7289a2f_create_is_encrypted.py | Python | apache-2.0 | 2,203 | 0 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""create is_encrypted
Revision ID: 1507a7289a2f
Revises: e3a246e0dc1
Create Date: 2015-08-18 18:57:51.927315
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision = '1507a7289a2f'
down_revision = 'e3a246e0dc1'
branch_labels = None
de | pends_on = None
connectionhelper = sa.Table(
'connection',
sa.MetaData(),
sa.Column('id', sa.Integer, prim | ary_key=True),
sa.Column('is_encrypted')
)
def upgrade():
# first check if the user already has this done. This should only be
# true for users who are upgrading from a previous version of Airflow
# that predates Alembic integration
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
# this will only be true if 'connection' already exists in the db,
# but not if alembic created it in a previous migration
if 'connection' in inspector.get_table_names():
col_names = [c['name'] for c in inspector.get_columns('connection')]
if 'is_encrypted' in col_names:
return
op.add_column(
'connection',
sa.Column('is_encrypted', sa.Boolean, unique=False, default=False))
conn = op.get_bind()
conn.execute(
connectionhelper.update().values(is_encrypted=False)
)
def downgrade():
op.drop_column('connection', 'is_encrypted')
|
botswana-harvard/ambition-subject | ambition_subject/admin/education_admin.py | Python | gpl-3.0 | 1,171 | 0 | from django.contrib import admin
from edc_model_admin import audit_fieldset_tuple
from ..admin_site import ambition_subject_admin
from ..forms import EducationForm
from ..models import Education
from .modeladmin_mixins import CrfModelAdminMixin
@admin.register(Education, si | te=ambition_subject_admin)
class EducationAdmin(CrfModelAdminMixin, admin.ModelAdmin):
form = EducationForm
additional_instructions = (
'The following questions refer to the educational background of '
'the patient.')
fieldsets = (
(None, {
'fields': [
'subject_visit',
'profession',
'education_years', |
'education_certificate',
'elementary',
'attendance_years',
'secondary',
'secondary_years',
'higher_education',
'higher_years',
'household_head']}
), audit_fieldset_tuple)
radio_fields = {
'household_head': admin.VERTICAL,
'elementary': admin.VERTICAL,
'secondary': admin.VERTICAL,
'higher_education': admin.VERTICAL}
|
marionleborgne/nupic.research | tests/layers/physical_objects_test.py | Python | agpl-3.0 | 4,977 | 0.005626 | # | ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the | terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from htmresearch.frameworks.layers.physical_objects import (
Sphere, Cylinder, Box, Cube
)
class PhysicalObjectsTest(unittest.TestCase):
"""Unit tests for physical objects."""
def testInitParams(self):
"""Simple construction test."""
sphere = Sphere(radius=5, dimension=6)
cylinder = Cylinder(height=50, radius=100, epsilon=5)
box = Box(dimensions=[1, 2, 3, 4], dimension=4)
cube = Cube(width=10, dimension=2)
self.assertEqual(sphere.radius, 5)
self.assertEqual(sphere.dimension, 6)
self.assertEqual(sphere.epsilon, sphere.DEFAULT_EPSILON)
self.assertEqual(cylinder.radius, 100)
self.assertEqual(cylinder.height, 50)
self.assertEqual(cylinder.dimension, 3)
self.assertEqual(cylinder.epsilon, 5)
self.assertEqual(box.dimensions, [1, 2, 3, 4])
self.assertEqual(box.dimension, 4)
self.assertEqual(box.epsilon, box.DEFAULT_EPSILON)
self.assertEqual(cube.dimensions, [10, 10])
self.assertEqual(cube.width, 10)
self.assertEqual(cube.dimension, 2)
self.assertEqual(sphere.epsilon, cube.DEFAULT_EPSILON)
def testSampleContains(self):
"""Samples points from the objects and test contains."""
sphere = Sphere(radius=20, dimension=6)
cylinder = Cylinder(height=50, radius=100, epsilon=2)
box = Box(dimensions=[10, 20, 30, 40], dimension=4)
cube = Cube(width=20, dimension=2)
for i in xrange(50):
self.assertTrue(sphere.contains(sphere.sampleLocation()))
self.assertTrue(cylinder.contains(cylinder.sampleLocation()))
self.assertTrue(box.contains(box.sampleLocation()))
self.assertTrue(cube.contains(cube.sampleLocation()))
# inside
self.assertFalse(sphere.contains([1] * sphere.dimension))
self.assertFalse(cube.contains([1] * cube.dimension))
self.assertFalse(cylinder.contains([1] * cylinder.dimension))
self.assertFalse(box.contains([1] * box.dimension))
# outside
self.assertFalse(sphere.contains([100] * sphere.dimension))
self.assertFalse(cube.contains([100] * cube.dimension))
self.assertFalse(cylinder.contains([100] * cylinder.dimension))
self.assertFalse(box.contains([100] * box.dimension))
def testPlotSampleLocations(self):
"""Samples points from objects and plots them in a 3D scatter."""
objects = []
objects.append(Sphere(radius=20, dimension=3))
objects.append(Cylinder(height=50, radius=100, epsilon=2))
objects.append(Box(dimensions=[10, 20, 30], dimension=3))
objects.append(Cube(width=20, dimension=3))
numPoints = 500
for i in xrange(4):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for _ in xrange(numPoints):
x, y, z = tuple(objects[i].sampleLocation())
ax.scatter(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.title("Sampled points from {}".format(objects[i]))
plt.savefig("object{}.png".format(str(i)))
plt.close()
def testPlotSampleFeatures(self):
"""Samples points from objects and plots them in a 3D scatter."""
objects = []
objects.append(Sphere(radius=20, dimension=3))
objects.append(Cylinder(height=50, radius=100, epsilon=2))
objects.append(Box(dimensions=[10, 20, 30], dimension=3))
objects.append(Cube(width=20, dimension=3))
numPoints = 500
for i in xrange(4):
for feature in objects[i].features:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for _ in xrange(numPoints):
x, y, z = tuple(objects[i].sampleLocationFromFeature(feature))
ax.scatter(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.title("Sampled points on {} from {}".format(feature, objects[i]))
plt.savefig("object_{}_{}.png".format(str(i), feature))
plt.close()
if __name__ == "__main__":
unittest.main()
|
allisnone/pytrade | low_high33_backtest.py | Python | gpl-2.0 | 42,194 | 0.019829 | # -*- coding:utf-8 -*-
import tradeStrategy as tds
import sendEmail as se
import tradeTime as tt
import tushare as ts
import pdSql_common as pds
from pdSql import StockSQL
import numpy as np
import sys,datetime
from pydoc import describe
from multiprocessing import Pool
import os, time
import file_config as fc
from position_history_update import combine_file,CHINESE_DICT
from position_history_update import get_latest_yh_k_stocks_from_csv
def get_stop_trade_symbol():
today_df = ts.get_today_all()
today_df = today_df[today_df.amount>0]
today_df_high_open = today_df[today_df.open>today_df.settlement*1.005]
all_trade_code = today_df['code'].values.tolist()
all_a_code = pds.get_all_code(hist_dir="C:/中国银河证券海王星/T0002/export/")
#all_a_code = pds.get_all_code(hist_dir="C:/hist/day/data/")
all_stop_codes = list(set(all_a_code).difference(set(all_trade_code)))
return all_stop_codes
def get_stopped_stocks(given_stocks=[],except_stocks=[],hist_dir='C:/hist/day/data/'):
import easyquotation
quotation =easyquotation.use('qq')
stop_stocks = []
if given_stocks:
this_quotation = quotation.stocks(given_stocks)
else:
this_quotation = quotation.all
all_stocks = list(this_quotation.keys())
#print('all_stocks=',('150251' in all_stocks))
#print('hist_dir=',hist_dir)
exist_codes = pds.get_all_code(hist_dir)
#print('exist_codes=',('150251' in exist_codes))
#print('all_stocks=',all_stocks)
all_codes = list(set(all_stocks) & (set(exist_codes)))
#print('all_codes=',all_codes)
for stock_code in all_codes:
if this_quotation[stock_code]:
#print(this_quotation[stock_code])
if this_quotation[stock_code]['ask1']==0 and this_quotation[stock_code]['volume']==0:
stop_stocks.append(stock_code)
else:
pass
if except_stocks:
all_codes = list(set(all_codes).difference(set(except_stocks)))
#print('all_codes=',('150251' in all_codes))
#print('stop_stocks=', stop_stocks)
#print(len(stop_stocks))
#print('all_stocks=',all_stocks)
#print(len(all_stocks))
return stop_stocks,all_codes
def get_exit_data(symbols,last_date_str):
refer_index = ['sh','cyb']
symbols = symbols +refer_index
temp_datas = {}
for symbol in symbols:
dest_df=pds.pd.read_csv('C:/hist/day/data/%s.csv' % symbol)
print(dest_df)
#dest_df = get_ | raw_h | ist_df(code_str=symbol)
if dest_df.empty:
pass
else:
dest_df_last_date = dest_df.tail(1).iloc[0]['date']
if dest_df_last_date==last_date_str:
exit_price = dest_df.tail(3)
return
#get_exit_data(symbols=['000029'],last_date_str='2016/08/23')
#get_stopped_stocks()
def back_test_dapan(test_codes,k_num=0,source='yh',rate_to_confirm = 0.01,processor_id=0):
i=0
for stock_symbol in test_codes:
if stock_symbol=='000029' and source=='easyhistory':
continue
print(i,stock_symbol)
s_stock=tds.Stockhistory(stock_symbol,'D',test_num=k_num,source=source,rate_to_confirm=rate_to_confirm)
if s_stock.h_df.empty:
print('New stock %s and no history data' % stock_symbol)
continue
if True:
if dapan_stocks and (stock_symbol in dapan_stocks):
dapan_criteria = ((s_stock.temp_hist_df['o_change']> 0.30) & (s_stock.temp_hist_df['pos20'].shift(1)<=1.0))
dapan_regress_column_type = 'open'
dapan_high_o_df,dapan_high_open_columns = s_stock.regress_common(dapan_criteria,post_days=[0,-1,-2,-3,-4,-5,-10,-20,-60],regress_column = dapan_regress_column_type,
base_column='open',fix_columns=['date','close','p_change','o_change','position','pos20','oo_chg','oh_chg','ol_chg','oc_chg'])
dapan_high_o_df['code'] = stock_symbol
dapan_high_o_df['ho_index'] = np.where(dapan_high_o_df['pos20']<=0,0,(dapan_high_o_df['o_change']/dapan_high_o_df['pos20']).round(2))
dapan_ho_df= dapan_ho_df.append(dapan_high_o_df)
else:
pass
def back_test_stocks(test_codes,k_num=0,source='yh',rate_to_confirm = 0.01,processor_id=0,save_type='',
all_result_columns=[],trend_columns=[],all_temp_columns=[],deep_star_columns=[]):
i=0
ma_num = 20
regress_column_type = 'close'
all_result_df = tds.pd.DataFrame({}, columns=all_result_columns)
all_trend_result_df = tds.pd.DataFrame({}, columns=trend_columns)
all_temp_hist_df = tds.pd.DataFrame({}, columns=all_temp_columns)
#deep_star_columns = ['date','close','p_change','o_change','position','low_high_open','high_o_day0','high_o_day1','high_o_day3',
# 'high_o_day5','high_o_day10','high_o_day20','high_o_day50']
#deep_star_columns = []
deep_star_df = tds.pd.DataFrame({}, columns=deep_star_columns)
print('processor_id=%s : %s'% (processor_id, test_codes))
for stock_symbol in test_codes:
if stock_symbol=='000029' and source=='easyhistory':
continue
print('processor_id=%s :%s,%s' %(processor_id,i,stock_symbol))
s_stock=tds.Stockhistory(stock_symbol,'D',test_num=k_num,source=source,rate_to_confirm=rate_to_confirm)
if s_stock.h_df.empty:
print('New stock %s and no history data' % stock_symbol)
continue
if True:
#try:
result_df = s_stock.form_temp_df(stock_symbol)
test_result = s_stock.regression_test(rate_to_confirm)
recent_trend = s_stock.get_recent_trend(num=ma_num,column='close')
s_stock.diff_ma(ma=[10,30],target_column='close',win_num=5)
temp_hist_df = s_stock.temp_hist_df.set_index('date')
#temp_hist_df.to_csv('C:/hist/day/temp/%s.csv' % stock_symbol)
temp_hist_df_tail = temp_hist_df.tail(1)
temp_hist_df_tail['code'] = stock_symbol
all_temp_hist_df= all_temp_hist_df.append(temp_hist_df_tail)
#columns = ['close','p_change','o_change','position','low_high_open','high_o_day0','high_o_day1','high_o_day3','high_o_day5','high_o_day10','high_o_day20']
#high_o_df,high_open_columns = s_stock.regress_high_open(regress_column = regress_column_type,base_column='open')
#criteria = s_stock.temp_hist_df['low_high_open']!= 0
criteria = ((s_stock.temp_hist_df['star_l']> 0.50) & (s_stock.temp_hist_df['l_change']<-3.0) & (s_stock.temp_hist_df['pos20'].shift(1)<0.2))
high_o_df,high_open_columns = s_stock.regress_common(criteria,post_days=[0,-1,-2,-3,-4,-5,-10,-20,-60],regress_column = regress_column_type,
base_column='close',fix_columns=['date','close','p_change','o_change','position','pos20','MAX20high','star_l'])
high_o_df['code'] = stock_symbol
high_o_df['star_index'] = np.where(high_o_df['pos20']<=0,0,(high_o_df['star_l']/high_o_df['pos20']*((high_o_df['MAX20high']-high_o_df['close'])/high_o_df['MAX20high'])).round(2))
deep_star_df= deep_star_df.append(high_o_df)
i = i+1
if test_result.empty:
pass
else:
test_result_df = tds.pd.DataFrame(test_result.to_dict(), columns=all_result_columns, index=[stock_symbol])
all_result_df = all_result_df.append(test_result_df,ignore_index=False)
if recent_trend.empty:
pass
else:
trend_result_df = tds.pd.DataFrame(recent_trend.to_dict(), columns=trend_columns, index=[stock_symbol])
all_trend_result_df = all_trend_result_df.append(trend_result_df,ignore_index=False)
#except:
# print('Regression test exception for stock: %s' % stock_symbol)
if save_type=='csv': #write to csv
all_temp_hist_df_file_name = 'C:/work/temp1/all_temp_hist_%s' %processor_id +'.csv'
all_resu |
Foxugly/medagenda | patient/views.py | Python | gpl-3.0 | 1,514 | 0.000661 | # -*- coding: utf-8 -*-
#
# Copyright 2015, Foxugly. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
f | rom patient.models import Patient
from django.http import HttpResponse
import json
from agenda.models import Slot
from django.shortcuts import render
from django.shortcuts import get_object_or_404
def search_patient(request):
if request.is_ajax():
email = request.GET['email']
if len(email) > 5:
p = Patient.objects.filter(email=email)
if len(p):
return HttpResponse(json.d | umps({'return': True, 'patient': p[0].as_json()}))
else:
return HttpResponse(json.dumps({'return': False}))
else:
return HttpResponse(json.dumps({'return': False}))
def confirm_create(request, patient_id, text):
p = get_object_or_404(Patient, id=patient_id, confirm=text)
if p:
p.active = True
p.confirm = None
p.save()
return render(request, 'valid.tpl')
def confirm_remove(request, patient_id, slot_id):
s = get_object_or_404(Slot, id=slot_id, patient__id=patient_id)
# s = Slot.objects.get(id=slot_id, patient__id=patient_id)
# TODO SEND MAIL PATIENT_REMOVE_BOOKING
s.clean_slot()
s.save()
return render(request, 'valid.tpl')
|
glic3rinu/basefs | basefs/tests/test_mount.py | Python | mit | 2,043 | 0.001468 | import shutil
import tempfile
import time
import os
import random
import subprocess
import unittest
from basefs.keys import Key
from basefs.logs import Log
from . import utils
class MountTests(unittest.TestCase):
def setUp(self):
__, self.logpath = tempfile.mkstemp()
__, self.logpath_b = tempfile.mkstemp()
self.addCleanup(os.remove, self.logpath)
self.addCleanup(os.remove, self.logpath_b)
__, self.keypath = tempfile.mkstemp()
self.addCleanup(os.remove, self.keypath)
self.port = random.randint(40000, 50000-1)
self.port_b = random.randint(50000, 60000)
log = Log(self.logpath)
root_key = Key.generate()
log.bootstrap([root_key], ['127.0.0.1:%i' % self.port])
root_key.save(self.keypath)
shutil.copy2(self.logpath, self.logpath_b)
self.hostname = utils.random_ascii(10)
self.hostname_b = utils.random_ascii(10)
self.mountpath = tempfile.mkdtemp()
self.mountpath_b = tempfile.mkdtemp()
context = {
'mountpath': self.mountpath,
'logpath': self.logpath,
'keypath': self.keypath,
'port': self.port,
'hostname': self.hostname,
}
cmd = 'basefs mount %(logpath)s %(mountpath)s -k %(keypath)s -p %(port)s -n %(hostname)s'
proc = subprocess.Popen(cmd % context, shell=True)
self.addCleanup(proc.kill)
time.sleep(1)
self.addCleanup(proc.kill)
context.update({
'mountpath': self.mountpath_b,
'logpath': self.logpath_b,
'port': self.port_b,
'hostname': self.hostname_b,
})
proc = subprocess.Popen(cmd % context, shell=True)
self.addCleanup(proc.kill)
self.addC | leanup(time.sleep, 1)
self.addCleanup(proc.kill)
self.addCleanup(shutil.rmtree, self.mountpath)
self.addCleanup(shutil.rmtree, self.mountpath_b)
| time.sleep(1)
def test_mount(self):
pass
|
NeptuneFramework/neptune | neptune/response.py | Python | apache-2.0 | 2,350 | 0.000426 | import os
import json
from jinja2 import Template
class NResponse(object):
"""
Main Handler for all HTTP Responses
"""
def __init__(self, http_version='', status='', headers={}, body=''):
self.http_version = http_version
self.headers = headers
self.status = status
self.body = body
self.response = ''
def encoded(self):
self.response = self._generate_response(self.http_version,
self.status,
| self.headers,
self.body) |
return self.response.encode()
def _generate_response(self, http_version, status, headers, body):
base = "{0} {1}\r\n".format(http_version, status)
for header in headers:
base += "{0}: {1}\r\n".format(header, headers[header])
# Also add custom headers (Server: Neptune)
base += "\r\n"
base += body
return base
def set_cookie(self, key, value):
self.headers.update({'Set-Cookie': '{0}={1}'.format(key, value)})
class HTTPResponse(NResponse):
"""
For HTTP Responses
"""
def __init__(self, body, **kwargs):
self.body = body
# TODO: Defaults should be taken in a better way
self.http_version = kwargs.get('http_version', 'HTTP/1.1')
self.status = kwargs.get('status', '200 OK')
self.headers = kwargs.get('headers', {})
self.headers.update({'Server': 'Neptune'})
super().__init__(http_version=self.http_version, status=self.status,
headers=self.headers, body=self.body)
class JSONResponse(HTTPResponse):
"""
For sending JSON Responses
"""
def __init__(self, body):
# TODO: Raise error if body can't be json dumped
super().__init__(json.dumps(body), headers={'Content-Type': 'application/json'})
class HTMLResponse(HTTPResponse):
"""
For sending HTML Responses
"""
def __init__(self, template, context={}):
# TODO: Raise error if template not found
html_path = os.path.join(os.getcwd(), 'templates', template)
html = open(html_path).read()
html = Template(html).render(**context)
super().__init__(html, headers={'Content-Type': 'text/html'})
|
argv0/cloudstack | tools/test/apisession.py | Python | apache-2.0 | 2,419 | 0.004134 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, | WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import cookielib
import hashlib
import json
import os
import random
import sys
import urllib2
import urllib
class ApiSession:
"""an ApiSession represents one api session, with cookies."""
def | __init__(self, url, username, password):
self._username = username
self._password = hashlib.md5(password).hexdigest()
self._url = url
self._cj = cookielib.CookieJar()
self._opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj))
def _get(self, parameters):
encoded = urllib.urlencode(parameters)
url = "%s?%s"% (self._url, encoded)
try:
f = self._opener.open(url)
return f.read()
except urllib2.HTTPError as exn:
print "Command %s failed" % parameters['command']
print "Reason: %s" % json.loads(exn.read())
def GET(self, parameters):
parameters['sessionkey'] = self._sessionkey
parameters['response'] = 'json'
return self._get(parameters)
def _post(self, parameters):
return self._opener.open(self._url, urllib.urlencode(parameters)).read()
def POST(self, parameters):
parameters['sessionkey'] = self._sessionkey
parameters['response'] = 'json'
return self._post(parameters)
def login(self):
params = {'command':'login', 'response': 'json'}
params['username'] = self._username
params['password'] = self._password
result = self._get(params)
jsonresult = json.loads(result)
jsessionid = None
self._sessionkey = jsonresult['loginresponse']['sessionkey']
|
trooble/census_plot | demo1/demo1/__init__.py | Python | gpl-3.0 | 726 | 0.004132 | # Copyright 2012 Karim Sumun
#
# This file is part of Simple Census Plotter.
#
# S | imple Census Plotter is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURP | OSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
lndbrg/flowirc | flowirc/tests/test_IRCClientProtocol.py | Python | mit | 2,853 | 0.000701 | from unittest import TestCase
from unittest.mock import Mock, patch, call, MagicMock
from flowirc.protocol import IRCClientProtocol
__author__ = 'Olle Lundberg'
class TestIRCClientProtocol(TestCase):
def setUp(self):
self.proto = IRCClientProtocol()
self.transport = Mock()
self.proto.message_received = Mock()
def tearDown(self):
self.proto = None
self.transport = None
def test_connection_made(self):
self.proto.after_connection_made = Mock()
self.proto.connection_made(self.transport)
self.assertEqual(self.proto._transport, self.transport)
self.assertEqual(1, self.proto.after_connection_made.call_count)
self.assertEqual((), self.proto.after_connection_made.call_args)
def test_send(self):
self.proto._transport = Mock()
self.proto.send('foo')
self.proto._transport.write.assert_called_once_with(b'foo')
self.proto._transport.reset_mock()
calls = [call(b'foo'), call(b'bar'), call(b'baz')]
self.proto.send('foo', 'bar', 'baz')
self.assertEqual(3, self.proto._transport.write.call_count)
self.proto._transport.write.assert_has_calls(calls)
self.proto._transport.reset_mock()
data = Mock()
data.encode = Mock(side_effect=AttributeError(
"'NoneType' object has no attribute 'encode'"))
self.assertRaises(AttributeError, self.proto.send, data)
@patch('asyncio.Task')
@patch('flowirc.protocol.IRCMessage')
def test_data_received(self, ircmessage, task):
self.proto.message_received = Mock()
self.proto.data_received(b'')
self.proto.data_received(b'f')
self.assertEqual(0, task.call_count)
self.proto.data_received(b'foo')
self.assertEqual(1, ircmessage.from_str.call_count)
self.assertEqual(1, self.proto.message_received.call_count)
@patch('asyncio.Task')
@patch('flowirc.protocol.IRCMessage')
def test_data_received_2(self, ircmessage, task):
self.proto.message_received = Mock()
ping = "PING irc.example.net\r\n"
mock = MagicMock(return_value=ping)
ircmessage.from_str = mock
self.proto.data_received(b' \r\nPING :irc.example.n | et\r\n')
self.assertEqual(1, ircmessage.from_str.call_count)
self.proto.message_received.assert_called_once_with(ping)
@patch('asyncio.Task')
@patch('flowirc.protocol.IRCMessage')
def test_data_received_3(self, ircmessage, task):
self.proto.message_received = Mock()
mock = MagicMock(return_value=None)
ircmessage.from_str = mock
self.proto.data_received(b' \r\nNOT_A_CMD :irc.example.net\r\n')
se | lf.assertEqual(1, ircmessage.from_str.call_count)
self.assertEqual(0, self.proto.message_received.call_count)
|
kennedyshead/home-assistant | homeassistant/components/tasmota/fan.py | Python | apache-2.0 | 3,093 | 0.000647 | """Support for Tasmota fans."""
from hatasmota import const as tasmota_const
from homeassistant.components import fan
from homeassistant.components.fan import FanEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util.percentage import (
ordered_list_item_to_percentage,
percentage_to_ordered_list_item,
)
from .const import DATA_REMOVE_DISCOVER_COMPONENT
from .discovery import TASMOTA_DISCOVERY_ENTITY_NEW
from .mixins import TasmotaAvailability, TasmotaDiscoveryUpdate
ORDERED_NAMED_FAN_SPEEDS = [
tasmota_const.FAN_SPEED_LOW,
tasmota_const.FAN_SPEED_MEDIUM,
tasmota_const.FAN_SPEED_HIGH,
] # off is | not included
async def async_se | tup_entry(hass, config_entry, async_add_entities):
"""Set up Tasmota fan dynamically through discovery."""
@callback
def async_discover(tasmota_entity, discovery_hash):
"""Discover and add a Tasmota fan."""
async_add_entities(
[TasmotaFan(tasmota_entity=tasmota_entity, discovery_hash=discovery_hash)]
)
hass.data[
DATA_REMOVE_DISCOVER_COMPONENT.format(fan.DOMAIN)
] = async_dispatcher_connect(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format(fan.DOMAIN),
async_discover,
)
class TasmotaFan(
TasmotaAvailability,
TasmotaDiscoveryUpdate,
FanEntity,
):
"""Representation of a Tasmota fan."""
def __init__(self, **kwds):
"""Initialize the Tasmota fan."""
self._state = None
super().__init__(
**kwds,
)
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
return len(ORDERED_NAMED_FAN_SPEEDS)
@property
def percentage(self):
"""Return the current speed percentage."""
if self._state is None:
return None
if self._state == 0:
return 0
return ordered_list_item_to_percentage(ORDERED_NAMED_FAN_SPEEDS, self._state)
@property
def supported_features(self):
"""Flag supported features."""
return fan.SUPPORT_SET_SPEED
async def async_set_percentage(self, percentage):
"""Set the speed of the fan."""
if percentage == 0:
await self.async_turn_off()
else:
tasmota_speed = percentage_to_ordered_list_item(
ORDERED_NAMED_FAN_SPEEDS, percentage
)
self._tasmota_entity.set_speed(tasmota_speed)
async def async_turn_on(
self, speed=None, percentage=None, preset_mode=None, **kwargs
):
"""Turn the fan on."""
# Tasmota does not support turning a fan on with implicit speed
await self.async_set_percentage(
percentage
or ordered_list_item_to_percentage(
ORDERED_NAMED_FAN_SPEEDS, tasmota_const.FAN_SPEED_MEDIUM
)
)
async def async_turn_off(self, **kwargs):
"""Turn the fan off."""
self._tasmota_entity.set_speed(tasmota_const.FAN_SPEED_OFF)
|
nexiles/odoo | addons/project_timesheet/project_timesheet.py | Python | agpl-3.0 | 15,625 | 0.00704 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import datetime
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
class project_project(osv.osv):
_inherit = 'project.project'
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
res = super(project_project, self).onchange_partner_id(cr, uid, ids, part, context)
if part and res and ('value' in res):
# set Invoice Task Work to 100%
data_obj = self.pool.get('ir.model.data')
data_id = data_obj._get_id(cr, uid, 'hr_timesheet_invoice', 'timesheet_invoice_factor1')
if data_id:
factor_id = data_obj.browse(cr, uid, data_id).res_id
res['value'].update({'to_invoice': factor_id})
return res
_defaults = {
'use_timesheets': True,
}
def open_timesheets(self, cr, uid, ids, context=None):
""" open Timesheets view """
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
project = self.browse(cr, uid, ids[0], context)
view_context = {
'search_default_account_id': [project.analytic_account_id.id],
'default_account_id': project.analytic_account_id.id,
}
help = _("""<p class="oe_view_nocontent_create">Record your timesheets for the project '%s'.</p>""") % (project.name,)
try:
if project.to_invoice and project.partner_id:
help+= _("""<p>Timesheets on this project may be invoiced to %s, according to the terms defined in the contract.</p>""" ) % (project.partner_id.name,)
except:
# if the user do not have access rights on the partner
pass
res = mod_obj.get_object_reference(cr, uid, 'hr_timesheet', 'act_hr_timesheet_line_evry1_all_form')
id = res and res[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
result['name'] = _('Timesheets')
result['context'] = view_context
result['help'] = help
return result
class project_work(osv.osv):
_inherit = "project.task.work"
def get_user_related_details(self, cr, uid, user_id):
res = {}
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', user_id)])
if not emp_id:
user_name = self.pool.get('res.users').read(cr, uid, [user_id], ['name'])[0]['name']
raise osv.except_osv(_('Bad Configuration!'),
_('Please define employee for user "%s". You must create one.')% (user_name,))
emp = emp_obj.browse(cr, uid, emp_id[0])
if not emp.product_id:
raise osv.except_osv(_('Bad Configuration!'),
_('Please define product and product category property account on the related employee.\nFill in the HR Settings tab of the employee form.'))
if not emp.journal_id:
raise osv.except_osv(_('Bad Configuration!'),
_('Please define journal on the related employee.\nFill in the timesheet tab of the employee form.'))
acc_id = emp.product_id.property_account_expense.id
if not acc_id:
acc_id = emp.product_id.categ_id.property_account_expense_categ.id
if not acc_id:
raise osv.except_osv(_('Bad Configuration!'),
_('Please define product and product category property account on the related employee.\nFill in the timesheet tab of the employee form.'))
res['product_id'] = emp.product_id.id
res['journal_id'] = emp.journal_id.id
res['general_account_id'] = acc_id
res['product_uom_id'] = emp.product_id.uom_id.id
return res
def _create_analytic_entries(self, cr, uid, vals, context):
"""Create the hr analytic timesheet from project task work"""
timesheet_obj = self.pool['hr.analytic.timesheet']
task_obj = self.pool['project.task']
vals_line = {}
timeline_id = False
acc_id = False
task_obj = task_obj.browse(cr, uid, vals['task_id'], context=context)
result = self.get_user_related_details(cr, uid, vals.get('user_id', uid))
vals_line['name'] = '%s: %s' % (tools.ustr(task_obj.name), tools.ustr(vals['name'] or '/'))
vals_line['user_id'] = vals['user_id']
vals_line['product_id'] = result['product_id']
if vals.get('date'):
if len(vals['date']) > 10:
timestamp = datetime.datetime.strptime(vals['date'], tools.DEFAULT_SERVER_DATETIME_FORMAT)
ts = fields.datetime.context_timestamp(cr, uid, timestamp, context)
vals_line['date'] = ts.strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
else:
vals_line['date'] = vals['date']
# Calculate quantity based on employee's product's uom
vals_line['unit_amount'] = vals['hours']
default_uom = self.pool['res.users'].browse(cr, uid, uid, context=context).company_id.project_time_mode_id.id
if result['product_uom_id'] != default_uom:
vals_line['unit_amount'] = self.pool['product.uom']._compute_qty(cr, uid, default_uom, vals['hours'], result['product_uom_id'])
acc_id = task_obj.project_id and task_obj.project_id.analytic_account_id.id or acc_id
if acc_id:
vals_line['account_id'] = acc_id
res = timesheet_obj.on_change_account_id(cr, uid, False, acc_id)
if res.get('value'):
vals_line.update(res['value'])
vals_line['general_account_id'] = result['general_account_id']
vals_line['journal_id'] = result['journal_id']
vals_line['amount'] = 0.0
vals_line['product_uom_id'] = result['product_uom_id']
amount = vals_line['unit_amount']
prod_id = vals_line['product_id']
unit = False
timeline_id = timesheet_obj.create(cr, uid, vals=vals_line, context=context)
# Compute based on pricetype
amount_unit = timesheet_obj.on_change_unit_amount(cr, uid, timeline_id | ,
prod_id, amount, False, unit, vals_line['journal_id'], context=context)
if amount_unit and 'amount' in amount_unit.get('value',{}):
updv = { 'amount': amount_unit['value']['amount'] }
| timesheet_obj.write(cr, uid, [timeline_id], updv, context=context)
return timeline_id
def create(self, cr, uid, vals, *args, **kwargs):
context = kwargs.get('context', {})
if not context.get('no_analytic_entry',False):
vals['hr_analytic_timesheet_id'] = self._create_analytic_entries(cr, uid, vals, context=context)
return super(project_work,self).create(cr, uid, vals, *args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
"""
When a project task work gets updated, handle its hr analytic timesheet.
"""
if context is None:
context = {}
timesheet_obj = self.pool.get('hr.analytic.timesheet')
uom_obj = self.pool.get('product.uom')
result = {}
if isinstance(ids, ( |
dnanexus/rseqc | rseqc/lib/qcmodule/annoGene.py | Python | gpl-3.0 | 9,403 | 0.064554 | import collections
from bx.intervals import *
from qcmodule import BED
'''Compare given bed entry to reference gene model'''
def getCDSExonFromFile(bedfile):
'''Only Extract CDS exon regions from input bed file (must be 12-column).'''
ret_lst=[]
for f in open(bedfile,'r'):
f = f.strip().split()
chrom = f[0]
chrom_start = int(f[1])
name = f[4]
strand = f[5]
cdsStart = int(f[6])
cdsEnd = int(f[7])
blockCount = int(f[9])
blockSizes = [ int(i) for i in f[10].strip(',').split(',') ]
blockStarts = [ chrom_start + int(i) for i in f[11].strip(',').split(',') ]
# grab cdsStart - cdsEnd
cds_exons = []
cds_seq = ''
genome_seq_index = []
chrom = chrom + ':' + strand
for base,offset in zip( blockStarts, blockSizes ):
if (base + offset) < cdsStart: continue
if base > cdsEnd: continue
exon_start = max( base, cdsStart )
exon_end = min( base+offset, cdsEnd )
#cds_exons.append( (exon_start, exon_end) )
ret_lst.append([chrom,exon_start,exon_end])
return ret_lst
def getUTRExonFromFile(bedfile,utr=35):
'''Only Extract UTR regions from input bed file (must be 12-column). output is 6-column bed format.
When utr=35 [default], extract both 5' and 3' UTR. When utr=3, only extract 3' UTR. When utr=5,
only extract 5' UTR'''
ret_lst=[]
for line in open(bedfile,'r'):
if line.startswith('#'):continue
if line.startswith('track'):continue
if line.startswith('browser'):continue
fields=line.rstrip('\r\n').split()
chrom=fields[0]
strand=fields[5]
txStart=int(fields[1])
txEnd=int(fields[2])
cdsStart=int(fields[6])
cdsEnd=int(fields[7])
exon_start=map(int,fields[11].rstrip(',').split(','))
exon_start=map((lambda x: x + txStart),exon_start)
exon_end=map(int,fields[10].rstrip(',').split(','))
exon_end=map((lambda x,y:x+y),exon_start,exon_end)
chrom = chrom + ':' + strand
if (utr==35 or utr==5):
for st,end in zip(exon_start,exon_end):
if st < cdsStart:
utr_st = st
utr_end = min(end,cdsStart)
ret_lst.append([chrom,utr_st,utr_end])
if (utr==35 or utr==3):
for st,end in zip(exon_start,exon_end):
if end > cdsEnd:
utr_st = max(st, cdsEnd)
utr_end = end
ret_lst.append([chrom,utr_st,utr_end])
return ret_lst
def getExonFromFile(bedfile):
'''Extract ALL exon regions from input bed file (must be 12-column). return list of [chrom:+ st end]'''
ret_lst=[]
for line in open(bedfile,'r'):
try:
if line.startswith('#'):continue
if line.startswith('track'):continue
if line.startswith('browser'):continue
fields=line.rstrip('\r\n').split()
txStart=int(fields[1])
chrom=fields[0]
strand=fields[5]
geneName=fields[3]
score=fields[4]
exon_start=map(int,fields[11].rstrip(',').split(','))
exon_start=map((lambda x: x + txStart),exon_start)
exon_end=map(int,fields[10].rstrip(',').split(','))
exon_end=map((lambda x,y:x+y),exon_start,exon_end)
except:
print >>sys.stderr,"[NOTE:input bed must be 12-column] skipped this line: " + line,
continue
chrom = chrom + ':' + strand
for st,end in zip(exon_start,exon_end):
ret_lst.append([chrom,st,end])
return ret_lst
def getExonFromFile2(bedfile):
'''Extract ALL exon regions from input bed file (must be 12-column). return dict'''
ret_dict_full = collections.defaultdict(set)
#ret_dict_inner = collections.defaultdict(set) #trim off start_of_1st_exon and end_of_last_exon
for line in open(bedfile,'r'):
tmp=[]
try:
if line.startswith('#'):continue
if line.startswith('track'):continue
if line.startswith('browser'):continue
fields=line.rstrip('\r\n').split()
txStart=int(fields[1])
chrom=fields[0]
strand=fields[5]
geneName=fields[3]
score=fields[4]
exon_start=map(int,fields[11] | .rstrip(',').split(','))
exon_start=map((lambda x: x + txStart),exon_start)
exon_end=map(int,fields[10].rstrip(',').split(','))
exon_end=map((lambda x,y:x+y),exon_start,exon_end)
key = chrom + ":" + txstart + "-" + txEnd + ":" + strand + ':' + geneName
except:
print >>sys.stderr,"[NOTE:input bed must be 12-column] skipped this line: " + line | ,
continue
for st,end in zip(exon_start,exon_end):
tmp.append(exon_start,exon_end)
ret_dict_full[key] = set(tmp)
#ret_dict_inner[key] = set(tmp[1:-1])
return ret_dict_full
def getUTRExonFromLine(bedline,utr=35):
'''Extract UTR regions from input bed line. When utr=35 [default], extract both
5' and 3' UTR. When utr=3, only extract 3' UTR. When utr=5,only extract 5' UTR'''
ret_lst=[]
line = bedline
if line.startswith('#'):return None
if line.startswith('track'):return None
if line.startswith('browser'):return None
if not line.strip():return None
fields=line.rstrip('\r\n').split()
chrom=fields[0]
strand=fields[5]
txStart=int(fields[1])
txEnd=int(fields[2])
cdsStart=int(fields[6])
cdsEnd=int(fields[7])
exon_start=map(int,fields[11].rstrip(',').split(','))
exon_start=map((lambda x: x + txStart),exon_start)
exon_end=map(int,fields[10].rstrip(',').split(','))
exon_end=map((lambda x,y:x+y),exon_start,exon_end)
chrom = chromm + ':' + strand
if (utr==35 or utr==5):
for st,end in zip(exon_start,exon_end):
if st < cdsStart:
utr_st = st
utr_end = min(end,cdsStart)
ret_lst.append([chrom,utr_st,utr_end])
if (utr==35 or utr==3):
for st,end in zip(exon_start,exon_end):
if end > cdsEnd:
utr_st = max(st, cdsEnd)
utr_end = end
ret_lst.append([chrom,utr_st,utr_end])
return ret_lst
def getCDSExonFromLine(bedline):
'''Extract CDS exon regions from input bed line (must be 12-column).'''
ret_lst=[]
line = bedline
if line.startswith('#'):return None
if line.startswith('track'):return None
if line.startswith('browser'):return None
if not line.strip():return None
f = line.strip().split()
chrom = f[0]
chrom_start = int(f[1])
name = f[4]
strand = f[5]
cdsStart = int(f[6])
cdsEnd = int(f[7])
blockCount = int(f[9])
blockSizes = [ int(i) for i in f[10].strip(',').split(',') ]
blockStarts = [ chrom_start + int(i) for i in f[11].strip(',').split(',') ]
# grab cdsStart - cdsEnd
cds_exons = []
cds_seq = ''
genome_seq_index = []
chrom = chromm + ':' + strand
for base,offset in zip( blockStarts, blockSizes ):
if (base + offset) < cdsStart: continue
if base > cdsEnd: continue
exon_start = max( base, cdsStart )
exon_end = min( base+offset, cdsEnd )
#cds_exons.append( (exon_start, exon_end) )
ret_lst.append([chrom,exon_start,exon_end])
return ret_lst
def getExonFromLine(bedline):
'''Extract ALL exon regions from input bed line (must be 12-column). return list of [chrom:+ st end]'''
ret_lst=collections.defaultdict(set)
line = bedline
#if line.startswith('#'):continue
#if line.startswith('track'):continue
#if line.startswith('browser'):continue
fields=line.rstrip('\r\n').split()
txStart=int(fields[1])
chrom=fields[0]
strand=fields[5]
geneName=fields[3]
score=fields[4]
exon_start=map(int,fields[11].rstrip(',').split(','))
exon_start=map((lambda x: x + txStart),exon_start)
exon_end=map(int,fields[10].rstrip(',').split(','))
exon_end=map((lambda x,y:x+y),exon_start,exon_end)
chrom = chrom + ':' + strand
for st,end in zip(exon_start,exon_end):
ret_lst[chrom].add(st)
ret_lst[chrom].add(end)
return ret_lst
def annotateBed(inputbed,refbed,outfile):
'''compare inputbed to refbed'''
ref_exon_ranges = {}
ref_exon_starts = collections.defaultdict(set) #key='chrom:+', value=set()
ref_exon_ends = collections.defaultdict(set)
OF = open(outfile,'w')
#read reference bed file
#Extract CDS exons from reference bed
tmp = getCDSExonFromFile(refbed)
for i in tmp: #chr:+, st, end (0-base)
ref_exon_starts[i[0]].add(int(i[1]))
ref_exon_ends[i[0]].add(int(i[2]))
if i[0] not in ref_exon_ranges:
ref_exon_ranges[i[0]] = Intersecter()
ref_exon_ranges[i[0]].add_interval( Interval( int(i[1]), int(i[2]) ))
#Extract UTR exons from reference bed
tmp = getUTRExonFromFile(refbed)
for i in tmp: #chr:+, st, end (0-base)
ref_exon_starts[i[0]].add(int(i[1]))
ref_exon_ends[i[0]].add(int(i[2]))
if i[0] not in ref_exon_ranges:
ref_exon_ranges[i |
dokipen/trac | trac/resource.py | Python | bsd-3-clause | 15,270 | 0.001506 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# Copyright (C) 2006-2007 Alec Thomas <alec@swapoff.org>
# Copyright (C) 2007 Christian Boos <cboos@neuf.fr>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christian Boos <cboos@neuf.fr>
# Alec Thomas <alec@swapoff.org>
from trac.core import *
from trac.util.translation import _
class ResourceNotFound(TracError):
"""Thrown when a non-existent resource is requested"""
class IResourceManager(Interface):
def get_resource_realms():
"""Return resource realms managed by the component.
:rtype: `basestring` generator
"""
def get_resource_url(resource, href, **kwargs):
"""Return the canonical URL for displaying the given resource.
:param resource: a `Resource`
:param href: an `Href` used for creating the URL
Note that if there's no special rule associated to this realm for
creating URLs (i.e. the standard convention of using realm/id applies),
then it's OK to not define this method.
"""
def get_resource_description(resource, format='default', context=None,
**kwargs):
"""Return a string representation of the resource, according to the
`format`.
:param resource: the `Resource` to describe
:param format: the kind of description wanted. Typical formats are:
`'default'`, `'compact'` or `'summary'`.
:param context: an optional rendering context to allow rendering rich
output (like markup containing links)
:type context: `Context`
Additional keyword arguments can be given as extra information for
some formats.
For example, the ticket with the id 123 is represented as:
- `'#123'` in `'compact'` format,
- `'Ticket #123'` for the `default` format.
- `'Ticket #123 (closed defect): This is the summary'` for the
`'summary'` format
Note that it is also OK to not define this method if there's no
special way to represent the resource, in which case the standard
representations 'realm:id' (in compact mode) or 'Realm id' (in
default mode) will be used.
"""
class Resource(object):
"""Resource identifier.
This specifies as precisely as possible *which* resource from a Trac
environment is manipulated.
A resource is identified by:
(- a `project` identifier) 0.12?
- a `realm` (a string like `'wiki'` or `'ticket'`)
- an `id`, which uniquely identifies a resource within its realm.
If the `id` information is not set, then the resource represents
the realm as a whole.
- an optional `version` information.
If `version` is `None`, this refers by convention to the latest
version of the resource.
Some generic and commonly used rendering methods are associated as well
to the Resource object. Those properties and methods actually delegate
the real work to the Resource's manager.
"""
__slots__ = ('realm', 'id', 'version', 'parent')
def __repr__(self):
path = []
r = self
while r:
name = r.realm
if r.id:
name += ':' + unicode(r.id) # id can be numerical
if r.version is not None:
name += '@' + unicode(r.version)
path.append(name or '')
r = r.parent
return '<Resource %r>' % (', '.join(reversed(path)))
def __eq__(self, other):
return self.realm == other.realm and \
self.id == other.id and \
self.version == other.version and \
self.parent == other.parent
def __hash__(self):
"""Hash this resource descriptor, including its hierarchy."""
path = ()
current = self
while current:
path += (self.realm, self.id, self.version)
current = current.parent
return hash(path)
# -- methods for creating other Resource identifiers
def __new__(cls, resource_or_realm=None, id=False, version=False,
parent=False):
"""Create a new Resource object from a specification.
:param resource_or_realm: this can be either:
- a `Resource`, which is then used as a base for making a copy
- a `basestring`, used to specify a `realm`
:param id: the resource identifier
:param version: the version or `None` f | or indicating the latest version
>>> main = Resource('wiki', 'WikiStart')
>>> repr(main)
"<Resource u'wiki:WikiStart'>"
>>> Resource(main) is main
True
>>> main3 = Resource(main, version=3)
>>> repr(main3)
"<Resource u'wiki:WikiStart@3'>"
>>> | main0 = main3(version=0)
>>> repr(main0)
"<Resource u'wiki:WikiStart@0'>"
In a copy, if `id` is overriden, then the original `version` value
will not be reused.
>>> repr(Resource(main3, id="WikiEnd"))
"<Resource u'wiki:WikiEnd'>"
>>> repr(Resource(None))
"<Resource ''>"
"""
realm = resource_or_realm
if isinstance(resource_or_realm, Resource):
if id is False and version is False and parent is False:
return resource_or_realm
else: # copy and override
realm = resource_or_realm.realm
if id is False:
id = resource_or_realm.id
if version is False:
if id == resource_or_realm.id:
version = resource_or_realm.version # could be 0...
else:
version = None
if parent is False:
parent = resource_or_realm.parent
else:
if id is False:
id = None
if version is False:
version = None
if parent is False:
parent = None
resource = super(Resource, cls).__new__(cls)
resource.realm = realm
resource.id = id
resource.version = version
resource.parent = parent
return resource
def __call__(self, realm=False, id=False, version=False, parent=False):
"""Create a new Resource using the current resource as a template.
Optional keyword arguments can be given to override `id` and
`version`.
"""
return Resource(realm is False and self or realm, id, version, parent)
# -- methods for retrieving children Resource identifiers
def child(self, realm, id=False, version=False):
"""Retrieve a child resource for a secondary `realm`.
Same as `__call__`, except that this one sets the parent to `self`.
>>> repr(Resource(None).child('attachment', 'file.txt'))
"<Resource u', attachment:file.txt'>"
"""
return Resource(realm, id, version, self)
class ResourceSystem(Component):
"""Resource identification and description manager.
This component makes the link between `Resource` identifiers and their
corresponding manager `Component`.
"""
resource_managers = ExtensionPoint(IResourceManager)
def __init__(self):
self._resource_managers_map = None
# Public methods
def get_resource_manager(self, realm):
"""Return the component responsible for resources in the given `realm`
:param realm: the realm name
:return: a `Component` implementing `IResourceManager` or `None`
"""
# build a dict of realm keys to IResourceManager implementations
if not self._resource_managers_map:
map = {}
for manager in self |
Byron/bcore | src/python/bcontext/tests/__init__.py | Python | lgpl-3.0 | 243 | 0.00823 | #-*-coding:utf-8-*-
"""
@package bcontext.tests
@brief tests for bcontext
@author Sebastian Thiel
@copyright [G | NU Lesser General Public License](https://www.gnu.org/licenses/lgpl.html)
"""
from __future__ import unico | de_literals
__all__ = []
|
yolanother/script.pseudotv.live | pseudotv.py | Python | gpl-3.0 | 1,802 | 0.007769 | # Copyright (C) 2011 Jason Anderson
#
#
# This file is part of PseudoTV.
#
# PseudoTV is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PseudoTV is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PseudoTV. If not, see <http://www.gnu.org/licenses/>.
import sys
import os, threading
import xbmc, xbmcgui
import xbmcaddon
from resources.lib.Globals import *
from resources.lib.ga import *
# Script constants
__scriptname__ = "PseudoTV Live"
__author__ = "Lunatixz, Orginally Jason102 & Angrycamel"
__url__ = "https://github.com/Lunatixz/script.pseudotv.live"
__settings__ = xbmcaddon.Addon(id='script.pseudotv.live')
__cwd__ = __settings__.getAddonInfo('path')
__version__ = VERSION
__language__ = __settings__.getLocalizedString
__cwd__ = __settings__.getAddonInfo('path')
import resources.lib.Overlay as Overlay
MyOverlayWindow = Overla | y.TVOverlay("script.pseudotv.live.TVOverlay.xml", __cwd__, Skin_Select)
for curthread in threading.enumerate():
try:
log("Active Thread: " + str(curthread.name), xbmc.LOGERROR)
|
if curthread.name != "MainThread":
try:
curthread.join()
except:
pass
log("Joined " + curthread.name)
except:
pass
del MyOverlayWindow
xbmcgui.Window(10000).setProperty("PseudoTVRunning", "False")
|
Pipe-s/dynamic_machine | dynamic_machine/cli_process_json_test.py | Python | mit | 4,232 | 0.008979 | '''
Created on Jun 19, 2014
@author: lwoydziak
'''
from dynamic_machine.cli_process_json import CliProcessingJson
from mockito.mocking import mock
from mockito.matchers import any
from mockito.mockito import when, verifyNoMoreInteractions, verify
from _pytest.runner import fail
from dynamic_machine.cli_commands import Command
from json import dumps, loads
def test_constructionWithInitialJson():
CliProcessingJson("garbage", '{"none":"none"}')
def test_successfullLoadFromFile():
jsonObject = mock()
cliProcessingJson = CliProcessingJson("garbage", jsonObject=jsonObject)
when(jsonObject).load(any()).thenReturn({})
cliProcessingJs | on.loadJsonFile()
verify(jsonObject).load(any())
def test_unsuccessfullLoadFromFile():
jsonObject = mock | ()
cliProcessingJson = CliProcessingJson("garbage", jsonObject=jsonObject)
when(jsonObject).load(any()).thenRaise(Exception())
cliProcessingJson.loadJsonFile()
def test_commandCreatedCorrectly():
cliProcessingJson = CliProcessingJson("garbage")
assert 'pwd' in str(cliProcessingJson.getCommand("pwd", {"dontCareAboutResult":None}).__dict__)
assert 'dontCareAboutResult' in str(cliProcessingJson.getCommand("pwd", {"dontCareAboutResult":None}).__dict__)
def test_commandCreatedCorrectlyWhenAssertingResults():
cliProcessingJson = CliProcessingJson("garbage")
assert 'assertResultEquals' in str(cliProcessingJson.getCommand("pwd", {"assertResultEquals":"/home/user"}).__dict__)
def test_commandCreatedCorrectlyWithTimeout():
cliProcessingJson = CliProcessingJson("garbage")
sample = {"assertResultEquals":("result", 60)}
string = dumps(sample)
json = loads(string)
assert "'_seconds': 60" in str(cliProcessingJson.getCommand("pwd", json).expectation._timeout.__dict__)
def test_executingWithNoJsonRaises():
cliProcessingJson = CliProcessingJson("garbage")
try:
cliProcessingJson.execute()
fail()
except:
pass
def setupCliProcessingJson(json, password=None):
cliProcessingJson = CliProcessingJson("garbage", initialJsonString=json)
UserObject = mock()
user = mock()
user.password = password
CliObject = mock()
cli = mock()
when(UserObject).called(any(), any()).thenReturn(user)
when(CliObject).called(any(), any(), debug=any(), trace=any()).thenReturn(cli)
return cliProcessingJson, UserObject, CliObject, cli, user
def test_excutingWithNoCommandsJustLogsIn():
json = '{\
"hostname" : "<ip>",\
"username" : "root",\
"password" : null\
}'
cliProcessingJson, UserObject, CliObject, cli, user = setupCliProcessingJson(json)
cliProcessingJson.execute(UserObject.called, CliObject.called)
verify(cli).connectWithSsh()
verify(cli).closeCliConnectionTo()
verifyNoMoreInteractions(user)
def test_excutingWithPasswordUsesPassword():
json = '{\
"hostname" : "<ip>",\
"username" : "root",\
"password" : "password"\
}'
cliProcessingJson, UserObject, CliObject, cli, _ = setupCliProcessingJson(json, "password")
cliProcessingJson.execute(UserObject.called, CliObject.called)
verify(cli, atleast=1).loginSsh()
def test_canExecuteCommand():
json = '{\
"hostname" : "<ip>",\
"username" : "root",\
"password" : null,\
"commands" : [ \
{"ls" : { "dontCareAboutResult" : null}},\
{"pwd" : { "assertResultEquals" : "/home/user"}},\
{"cd /" : { "assertResultNotEquals" : "Permission Denied"}}\
]\
}'
cliProcessingJson, UserObject, CliObject, _, user = setupCliProcessingJson(json, "password")
cliProcessingJson.execute(UserObject.called, CliObject.called)
verify(user, times=3).execute(any())
def test_raiseWhenSshConnectionFails():
json = '{\
"hostname" : "<ip>",\
"username" : "root",\
"password" : "password"\
}'
cliProcessingJson, UserObject, CliObject, cli, _ = setupCliProcessingJson(json, "password")
when(cli).loginSsh().thenRaise(Exception("Any"))
try:
cliProcessingJson.execute(UserObject.called, CliObject.called)
fail()
except:
pass
|
vitay/ANNarchy | ANNarchy/generator/Projection/OpenMP/__init__.py | Python | gpl-2.0 | 1,202 | 0.002496 | """
The OpenMP package does contain all code templates required for the openMP |
code generation in ANNarchy.
BaseTemplates:
defines the basic defintions common to all sparse matrix formates, e. g. projection header
[FORMAT]_SingleThread:
defines the format specific defintions for the currently available formats:
* LIL: list-in-list
* COO: coordinate
* CSR: compressed sparse row
* ELL: ELLPACK/ITPACK
* ELL-R: ELLPACK format with row-leng | th array
* Dense: a full matrix representation
there are some special purpose implementations:
* CSR_T: compressed sparse row (transposed)
* LIL_P: a partitioned LIL representation
"""
from . import LIL as LIL_OpenMP
from . import LIL_P as LIL_Sliced_OpenMP
from . import COO as COO_OpenMP
from . import CSR as CSR_OpenMP
from . import CSR_T as CSR_T_OpenMP
from . import CSR_T_P as CSR_T_Sliced_OpenMP
from . import ELL as ELL_OpenMP
from . import ELLR as ELLR_OpenMP
from . import Dense as Dense_OpenMP
__all__ = ["BaseTemplates", "LIL_OpenMP", "LIL_Sliced_OpenMP", "COO_OpenMP", "CSR_OpenMP", "CSR_T_OpenMP", "CSR_T_Sliced_OpenMP", "ELL_OpenMP", "ELLR_OpenMP", "Dense_OpenMP"] |
atakan/Fractal-Trails | trail_maker.py | Python | gpl-3.0 | 6,471 | 0.014219 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Mehmet Atakan Gürkan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program (probably in a file named COPYING).
# If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, division
import numpy as np
from numpy.random import random, seed
import argparse, sys
parser = argparse.ArgumentParser(description='Creates a random walk trail')
parser.add_argument('-d',
type=int, default=3,
help='topological dimension of the trail (default: 3)')
parser.add_argument('-N',
type=int, default=1000,
help='number of steps in the trail (default: 1000)')
parser.add_argument('-i',
type=float, default=1.0,
help='size of increments (will get normalized by 1/sqrt(N))')
parser.add_argument('-r',
type=int, default=0,
help='repository size (default: 0)')
parser.add_argument('-c',
type=float, default=0.0,
help='bias strength')
parser.add_argument('-b',
type=float, default=3.0e40,
help='radius of boundary')
parser.add_argument('-s',
type=int, default=42,
help='random number seed (default: 42)')
parser.add_argument('--rangen',
type=int, default=-1,
help='generate this many random numbers before starting to build the trail (default: repo size; ignored if less than repo size)')
parser.add_argument('-P0',
type=float, default=0.0, dest='P0',
help='initial value (for 1D)')
parser.add_argument('-P0x',
type=float, default=0.0, dest='P0x',
| help='initial value of x component (for 3D)')
parser.add_argument('-P0y',
type=float, default=0.0, dest='P0y',
help='initial value of y component (for 3D)')
parser.add_argument('-P0z',
type=float, default=0.0, dest='P0z',
help='initial value of z component (for 3D)')
parser.add_argument('-o','--output-file',
| dest='outfile',
type=argparse.FileType('w'),
default=sys.stdout,
help='output filename (if not given, use stdout)')
parser.add_argument('--numpy', dest='outputformat', action='store_const',
const='numpy', default='undecided',
help='output in NumPy format (default: ASCII for stdout, NumPy for file')
parser.add_argument('--ascii', dest='outputformat', action='store_const',
const='ascii', default='undecided',
help='output in ASCII format (default: ASCII for stdout, NumPy for file')
args = parser.parse_args()
seed(args.s)
if args.rangen > args.r :
dummy = random(args.rangen - args.r)
N = args.N
d = args.d
b = args.b
dP = args.i/np.sqrt(N)
def trail_1d(N, r=0, c=0.0, b=3.0e40) :
P0 = args.P0
P = P0
trl = np.empty(N+1)
trl[0] = P0
if r>0 and c!=0.0 : # repository initialization
use_rep = True
rep_norm = 1.0/np.sqrt(r)
rep = random(r)-0.5
q = np.sum(rep)*rep_norm * c
else :
use_rep = False
dummy = random(r)
q = 0.0
for i in range(N) :
X = random() - 0.5
if X>q : DP = -dP
else : DP = dP
if np.fabs(P+DP) > b :
P -= DP
else :
P += DP
trl[i+1] = P
if use_rep :
rep[i%r] = X
q = -1.0*np.sum(rep)*rep_norm * c
return trl
def trail_3d(N, r=0, c=0.0, b=3.0e40) :
def vec_norm2(a) :
return a[0]*a[0] + a[1]*a[1] + a[2]*a[2]
b2 = b*b
P0x, P0y, P0z = args.P0x, args.P0y, args.P0z
Px, Py, Pz = P0x, P0y, P0z
trl = np.empty((N+1,3))
trl[0] = P0x, P0y, P0z
if r>0 and c!=0.0 : # repository initialization
use_rep = True
rep_norm = 1.0/np.sqrt(r)
repx, repy, repz = random(r)-0.5, random(r)-0.5, random(r)-0.5
qx = np.sum(repx)*rep_norm * c
qy = np.sum(repy)*rep_norm * c
qz = np.sum(repz)*rep_norm * c
else :
use_rep = False
dummy = random(r*3)
qx, qy, qz = 0.0, 0.0, 0.0
for i in range(N) :
Xx = random() - 0.5
Xy = random() - 0.5
Xz = random() - 0.5
if Xx>qx : DPx = -dP
else : DPx = dP
if Xy>qy : DPy = -dP
else : DPy = dP
if Xz>qz : DPz = -dP
else : DPz = dP
Ptry = Px+DPx, Py+DPy, Pz+DPz
if vec_norm2(Ptry) > b2 : # we'll cross bndry if we take this step
Px -= DPx # so we take the opposite step
Py -= DPy
Pz -= DPz
else : # we are safe
Px += DPx # so we take normal step
Py += DPy
Pz += DPz
trl[i+1] = (Px, Py, Pz)
if use_rep :
repx[i%r], repy[i%r], repz[i%r]= Xx, Xy, Xz
qx = -1.0*np.sum(repx)*rep_norm * c
qy = -1.0*np.sum(repy)*rep_norm * c
qz = -1.0*np.sum(repz)*rep_norm * c
return trl
if args.outputformat == 'undecided' :
if args.outfile == sys.stdout :
outputformat = 'ascii'
else :
outputformat = 'numpy'
else :
outputformat = args.outputformat
if d==1 :
trl = trail_1d(N, r=args.r, c=args.c, b=args.b)
if outputformat == 'ascii' :
for p in trl :
print('%e' % (p), file=args.outfile)
else :
np.save(args.outfile, trl)
elif d==3 :
trl = trail_3d(N, r=args.r, c=args.c, b=args.b)
if outputformat == 'ascii' :
for p in trl :
print('%e %e %e' % (p[0], p[1], p[2]), file=args.outfile)
else :
np.save(args.outfile, trl)
else :
print('illegal dimension given: %d' %(d))
|
zugaldia/capitalbikeshare | appengine/app/shared/models/user_model.py | Python | apache-2.0 | 2,057 | 0 | from appython.components.api.common_fields import IsSetField
from appython.components.models.base_model import BaseModel
from appython.components.user.base_manager import BaseManager
from flask.ext.restful import fields, marshal
from google.appengine.ext import ndb
class UserModel(BaseModel):
# Every user must have an email address
email = ndb.StringProperty(required=True)
# For email/password users
password = ndb.StringProperty()
# Used to authenticante API requests
api_key = ndb.StringProperty()
# Flags
is_admin = ndb.BooleanProperty(default=False)
'''
API
'''
def to_api(self):
custom_fields = {
'email': fields.String,
'password': IsSetField,
'api_key': IsSetField,
'is_admin': fields.Boolean}
basic_fields = self.get_basic_fields()
basic_fields.update(custom_fields)
return marshal(self, basic_fields)
'''
Manual creation
'''
@classmethod
def create_from_email(cls, email, password):
email_ready = BaseManager.prepare_email(email=email)
user_model = cls(email=email_ready)
user_model.password = BaseManager.get_password_hash(password=password)
return user_mode | l
'''
Getters
'''
@classmethod
def get_by_email(cls, email):
email_ready = BaseManager.prepare_email(email=email)
return cls.query(cls.email == email_ready).get()
@classmethod
def get_by_api_key(cls, api_key):
return cls.query(cls.ap | i_key == api_key).get()
'''
Required by Flask-Login. Note that @property doesn't work here because
it only works with new-style classes.
'''
def is_active(self):
return False if self.deleted else True
def is_authenticated(self):
return True
def is_anonymous(self):
return False
def get_id(self):
# Overrides the default implementation. Flask-Login requires this
# to be an unicode always.
return unicode(self.key.urlsafe())
|
ballouche/navitia | source/jormungandr/jormungandr/autocomplete/abstract_autocomplete.py | Python | agpl-3.0 | 1,893 | 0.000528 | # coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from abc import abstractmethod, ABCMeta
class AbstractAutocomplete(object):
"""
abstract class managing calls autocomplete
""" |
__metaclass__ = ABCMeta
@abstractmethod
def get(self, query, instance):
pass
@abstractmethod
def geo_status(self, instance):
pass
class GeoStatusResponse(object):
def __init__(self):
self.street_network_sources = []
self.poi_sources = []
self.nb_admins = None
self.nb_admins_from_cities = None
self.nb_ways = None
self.nb_addres | ses = None
self.nb_pois = None
|
matslindh/codingchallenges | adventofcode2016/25.py | Python | mit | 3,632 | 0.005507 | instr = [x.strip().split(' ') for x in open("input/dec25").readlines()]
skip = {}
modified = {}
#instr[1] = ['add', 'a', '2572']
#skip[2] = skip[3] = skip[4] = skip[5] = skip[6] = skip[7] = skip[8] = skip[9] = True
#instr[6] = ['add', 'a', 'c'] # adds c to d, sets c to 0
#skip[7] = True
#skip[8] = True
#modified[6] = modified[7] = modified[8] = True
#instr[9] = ['mul', 'a', 'd'] # multiplies a with d
#skip[10] = True
#modified[9] = modified[10] = True
"""instr[10] = ['add', 'a', 'b'] # adds b to a, sets b to 0
skip[11] = True
skip[12] = True"""
#instr[14] = ['mul', 'a', 'd'] # multiplies a with d
#skip[15] = True
def print_program(inss):
i = 0
for inst in inss:
prefix = ' # ' if i in skip else ' '
print(prefix, i, inst)
i += 1
print_program(instr)
# evaluated a couple of numbers, found that it found the binary representation of a number, found
# first number above 2572 (which instr 1 - 9 adds to the number) that repeats itself (ends with 0 and is 101010 etc.)
# and subtracted 2572
for x in [158]:
pc = 0
reg = {'a': x, 'b': 0, 'c': 0, 'd': 0}
output = ''
while pc < len(instr):
if pc in skip:
pc += 1
continue
inst = instr[pc]
if inst[0] == 'add':
v = reg[inst[2]] if inst[2] in reg else int(inst[2])
reg[inst[1]] += v
reg[inst[2]] = 0
pc += 1
elif inst[0] == 'mul':
reg[inst[1]] *= reg[inst[2]]
reg[inst[2]] = 0
pc += 1
elif inst[0] == 'cpy':
if inst[2] in reg:
if inst[1] in reg:
reg[inst[2]] = reg[inst[1]]
else:
reg[inst[2]] = int(inst[1])
pc += 1
elif inst[0] == 'inc':
reg[inst[1]] += 1
pc += 1
elif inst[0] == 'dec':
reg[inst[1]] -= 1
pc += 1
elif inst[0] == ' | jnz':
if (inst[1] in reg and reg[inst[1]] != 0) or (inst[1] not in reg and int(inst[1]) != 0):
if inst[2] in reg:
pc += reg[inst[2]]
else:
pc += int(inst[2])
else:
pc += 1
elif inst[0] == 'tgl':
if inst[1] in reg:
d = pc + reg[inst[1]]
|
# valid
if d < len(instr) and d >= 0:
if d in modified:
print("modified instruction tggled")
if len(instr[d]) == 2:
if instr[d][0] == 'inc':
instr[d][0] = 'dec'
else:
instr[d][0] = 'inc'
elif len(instr[d]) == 3:
if instr[d][0] == 'jnz':
instr[d][0] = 'cpy'
else:
instr[d][0] = 'jnz'
else:
print(" invalid register", inst[1])
pc += 1
elif inst[0] == 'out':
v = reg[inst[1]] if inst[1] in reg else inst[1]
output += str(v)
print(output)
#if len(output) > 1 and output != '01':
# break
#elif len(output) > 1:
# print("THIS IS IT", x)
pc += 1
else:
print("INVALID INSTRUCTION", inst)
if pc == 8:
print(reg)
if pc == 28:
print('loop', reg)
if pc == 29:
print(x, bin(x), bin(x+2572), output)
break
print(reg['a']) |
power12317/Chrome-Data-Compression-Proxy-Standalone-Python | google.py | Python | gpl-2.0 | 6,470 | 0.007419 | #!/usr/bin/env python
#coding:utf-8
# Author: Beining --<cnbeining#gmail.com>
# Purpose: A Chrome DCP handler.
# Created: 07/15/2015
#Original copyright info:
#Author: Xiaoxia
#Contact: xiaoxia@xiaoxia.org
#Website: xiaoxia.org
import sys
import argparse
from th | reading import Thread, Lock
from struct import unpack
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from httplib import HTTPResponse, HTTPSConnection
from Socket | Server import ThreadingMixIn
import socket, os, select
import time, sys, random
import threading
import select
import socket
import ssl
import socket
import urllib2
# Minimize Memory Usage
threading.stack_size(128*1024)
BufferSize = 8192
RemoteTimeout = 15
from hashlib import md5
global PROXY_MODE
PROXY_MODE = 'HTTPS'
#----------------------------------------------------------------------
def get_long_int():
"""None->int
get a looooooong integer."""
return str(random.randint(100000000, 999999999))
#----------------------------------------------------------------------
def get_google_header():
"""None->str
As in https://github.com/cnbeining/datacompressionproxy/blob/master/background.js#L10-L18 .
P.S: This repo is a fork of the original one on google code.
"""
authValue = 'ac4500dd3b7579186c1b0620614fdb1f7d61f944'
timestamp = str(int(time.time()))
return 'ps=' + timestamp + '-' + get_long_int() + '-' + get_long_int() + '-' + get_long_int() + ', sid=' + md5((timestamp + authValue + timestamp).encode('utf-8')).hexdigest() + ', b=2403, p=61, c=win'
#----------------------------------------------------------------------
def check_if_ssl():
"""None->str
Check whether DCP should use HTTPS.
As in https://support.google.com/chrome/answer/3517349?hl=en"""
response = urllib2.urlopen('http://check.googlezip.net/connect')
if response.getcode() is 200 and 'OK' in response.read():
print('INFO: Running in HTTPS mode.')
return 'HTTPS'
else:
print('WARNING: Running in HTTP mode, your network admin can see your traffic!')
return 'HTTP'
class Handler(BaseHTTPRequestHandler):
remote = None
# Ignore Connection Failure
def handle(self):
try:
BaseHTTPRequestHandler.handle(self)
except socket.error: pass
def finish(self):
try:
BaseHTTPRequestHandler.finish(self)
except socket.error: pass
def sogouProxy(self):
if self.headers["Host"].startswith('chrome_dcp_proxy_pac.cnbeining'): #Give a PAC file
self.wfile.write("HTTP/1.1 200 OK".encode('ascii') + b'\r\n')
hstr = '''Host: 127.0.0.1
function FindProxyForURL(url, host) {
if (url.substring(0,5) == 'http:' &&
!isPlainHostName(host) &&
!shExpMatch(host, '*.local') &&
!isInNet(dnsResolve(host), '10.0.0.0', '255.0.0.0') &&
!isInNet(dnsResolve(host), '172.16.0.0', '255.240.0.0') &&
!isInNet(dnsResolve(host), '192.168.0.0', '255.255.0.0') &&
!isInNet(dnsResolve(host), '127.0.0.0', '255.255.255.0') )
return 'PROXY ''' + server_ip + ':' + str(server_port) + '''; DIRECT';
return 'DIRECT';
}'''
self.wfile.write(hstr + b'\r\n')
return
if self.remote is None or self.lastHost != self.headers["Host"]:
if PROXY_MODE == 'HTTPS':
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_default_certs()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(RemoteTimeout)
self.remote = context.wrap_socket(s, server_hostname='proxy.googlezip.net')
self.remote.connect(('proxy.googlezip.net', 443))
else: #HTTP
self.remote = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.remote.settimeout(RemoteTimeout)
self.remote.connect(("compress.googlezip.net", 80))
self.remote.sendall(self.requestline.encode('ascii') + b"\r\n")
# Add Verification Tags
self.headers["Chrome-Proxy"] = get_google_header()
headerstr = str(self.headers).replace("\r\n", "\n").replace("\n", "\r\n")
self.remote.sendall(headerstr.encode('ascii') + b"\r\n")
# Send Post data
if self.command == 'POST':
self.remote.sendall(self.rfile.read(int(self.headers['Content-Length'])))
response = HTTPResponse(self.remote, method=self.command)
response.begin()
# Reply to the browser
status = "HTTP/1.1 " + str(response.status) + " " + response.reason
self.wfile.write(status.encode('ascii') + b'\r\n')
hlist = []
for line in response.msg.headers: # Fixed multiple values of a same name
if 'TRANSFER-ENCODING' not in line.upper():
hlist.append(line)
self.wfile.write("".join(hlist) + b'\r\n')
if self.command == "CONNECT": # NO HTTPS, as Chrome DCP does not allow HTTPS traffic
return
else:
while True:
response_data = response.read(BufferSize)
if not response_data: break
self.wfile.write(response_data)
do_POST = do_GET = do_CONNECT = sogouProxy
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
address_family = socket.AF_INET6
if __name__=='__main__':
global server_ip, server_port
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', default=8080)
parser.add_argument('-m', '--mode', default= 'HTTPS')
parser.add_argument('-i', '--ip', default= '')
args = vars(parser.parse_args())
PROXY_MODE = args['mode'].upper() if str(args['mode']).upper() == 'HTTP' or str(args['mode']).upper() == 'HTTPS' else check_if_ssl()
server_ip, server_port = str(args['ip']), int(args['port'])
server_address = (server_ip, server_port)
server = ThreadingHTTPServer(server_address, Handler)
if not server_ip:
server_ip = '127.0.0.1'
proxy_host = "proxy.googlezip.net:443" if PROXY_MODE == 'HTTPS' else "compress.googlezip.net:80"
print('Proxy over %s.\nPlease set your browser\'s proxy to %s.' % (proxy_host, server_address))
print('Or use PAC file: http://chrome_dcp_proxy_pac.cnbeining.com/1.pac')
try:
server.serve_forever()
except:
os._exit(1) |
quasiben/bokeh | bokeh/models/formatters.py | Python | bsd-3-clause | 13,993 | 0.001358 | """ Models for controlling the text and visual formatting of tick
labels on Bokeh plot axes.
"""
from __future__ import absolute_import
from .tickers import Ticker
from ..model import Model
from ..core.properties import abstract
from ..core.properties import Bool, Int, String, Enum, Auto, List, Dict, Either, Instance
from ..core.enums import DatetimeUnits, RoundingFunction, NumeralLanguage
@abstract
class TickFormatter(Model):
""" A base class for all tick formatter types. ``TickFormatter`` is
not generally useful to instantiate on its own.
"""
pass
class BasicTickFormatter(TickFormatter):
""" Display tick values from continuous ranges as "basic numbers",
using scientific notation when appropriate by default.
"""
precision = Either(Auto, Int, help="""
How many digits of precision to display in tick labels.
""")
use_scientific = Bool(True, help="""
Whether to ever display scientific notation. If ``True``, then
when to use scientific notation is controlled by ``power_limit_low``
and ``power_limit_high``.
""")
power_limit_high = Int(5, help="""
Limit the use of scientific notation to when::
log(x) >= power_limit_high
""")
power_limit_low = Int(-3, help="""
Limit the use of scientific notation to when::
log(x) <= power_limit_low
""")
class NumeralTickFormatter(TickFormatter):
""" Tick formatter based on a human-readable format string. """
format = String("0,0", help="""
The number format, as defined in the following tables:
**NUMBERS**:
============ ============== ===============
Number Format String
============ ============== ===============
10000 '0,0.0000' 10,000.0000
10000.23 '0,0' 10,000
10000.23 '+0,0' +10,000
-10000 '0,0.0' -10,000.0
10000.1234 '0.000' 10000.123
10000.1234 '0[.]00000' 10000.12340
-10000 '(0,0.0000)' (10,000.0000)
-0.23 '.00' -.23
-0.23 '(.00)' (.23)
0.23 '0.00000' 0.23000
0.23 '0.0[0000]' 0.23
1230974 '0.0a' 1.2m
1460 '0 a' 1 k
-104000 '0a' -104k
1 '0o' 1st
52 '0o' 52nd
23 '0o' 23rd
100 '0o' 100th
============ ============== ===============
**CURRENCY**:
=========== =============== =============
Number Format String
=========== =============== =============
1000.234 '$0,0.00' $1,000.23
1000.2 '0,0[.]00 $' 1,000.20 $
1001 '$ 0,0[.]00' $ 1,001
-1000.234 '($0,0)' ($1,000)
-1000.234 '$0.00' -$1000.23
1230974 '($ 0.00 a)' $ 1.23 m
=========== =============== =============
**BYTES**:
=============== =========== ============
Number Format String
=============== =========== ============
100 '0b' 100B
2048 '0 b' 2 KB
7884486213 '0.0b' 7.3GB
3467479682787 '0.000 b' 3.154 TB
=============== =========== ============
**PERCENTAGES**:
============= ============= ===========
Number Format String
============= ============= ===========
1 '0%' 100%
0.974878234 '0.000%' 97.488%
-0.43 '0 %' -43 %
0.43 '(0.000 %)' 43.000 %
============= ============= ===========
**TIME**:
============ ============== ============
Number Format String
============ ============== ============
25 '00:00:00' 0:00:25
238 '00:00:00' 0:03:58
63846 '00:00:00' 17:44:06
============ ============== ============
For the complete specification, see http://numbrojs.com/format.html
""")
language = Enum(NumeralLanguage, default="en", help="""
The language to use for formatting language-specific features (e.g. thousands separator).
""")
rounding = Enum(RoundingFunction, help="""
Rounding functions (round, floor, ceil) and their synonyms (nearest, rounddown, roundup).
""")
class PrintfTickFormatter(TickFormatter):
""" Tick formatter based on a printf-style format string. """
format = String("%s", help="""
The number format, as defined as follows: the placeholder in the format
string is marked by % and is followed by one or more of these elements,
in this order:
* An optional ``+`` sign
Causes the result to be preceded with a plus or minus sign on numeric
values. By default, only the ``-`` sign is used on negative numbers.
* An optional padding specifier
Specifies what (if any) character to use for padding. Possible values
are 0 or any other character preceded by a ``'`` (single quote). The
default is to pad with spaces.
* An optional ``-`` sign
Causes sprintf to left-align the result of this placeholder. The default
is to right-align the result.
* An optional number
Specifies how many characters the result should have. If the value to be
returned is shorter than this number, the result will be padded.
* An optional precision modifier
Consists of a ``.`` (dot) followed by a number, specifies how many digits
should be displayed for floating point numbers. When used on a string, it
causes the result to be truncated.
* A type specifier
Can be any of:
- ``%`` --- yields a literal ``%`` character
- ``b`` --- yields an integer as a binary number
- ``c`` --- yields an integer as the characte | r with that ASCII value
- ``d`` or ``i`` --- yields an integer as a signed decimal number
- ``e`` --- yields a float using scientific notation
- ``u`` --- yields an integ | er as an unsigned decimal number
- ``f`` --- yields a float as is
- ``o`` --- yields an integer as an octal number
- ``s`` --- yields a string as is
- ``x`` --- yields an integer as a hexadecimal number (lower-case)
- ``X`` --- yields an integer as a hexadecimal number (upper-case)
""")
class LogTickFormatter(TickFormatter):
""" Display tick values from continuous ranges as powers
of some base.
Most often useful in conjunction with a ``LogTicker``.
"""
ticker = Instance(Ticker, help="""
The corresponding ``LogTicker``, used to determine the correct
base to use. If unset, the formatter will use base 10 as a default.
""")
class CategoricalTickFormatter(TickFormatter):
""" Display tick values from categorical ranges as string
values.
"""
pass
DEFAULT_DATETIME_FORMATS = lambda : {
'microseconds': ['%fus'],
'milliseconds': ['%3Nms', '%S.%3Ns'],
'seconds': ['%Ss'],
'minsec': [':%M:%S'],
'minutes': [':%M', '%Mm'],
'hourmin': ['%H:%M'],
'hours': ['%Hh', '%H:%M'],
'days': ['%m/%d', '%a%d'],
'months': ['%m/%Y', '%b%y'],
'years': ['%Y'],
}
class DatetimeTickFormatter(TickFormatter):
""" Display tick values from a continuous range as formatted
datetimes.
"""
formats = Dict(Enum(DatetimeUnits), List(String), default=DEFAULT_DATETIME_FORMATS, help="""
User defined formats for displaying datetime values.
The enum values correspond roughly to different "time scales". The
corresponding value is a list of `strftime`_ formats to use for
formatting datetime tick values that fall in in that "time scale".
By default, only the first format string passed for each time scale
will be used. By default, all leading zeros are stripped away from
the formatted labels. These behaviors cannot be changed as of now.
An example of specifying the same date format over a range of time scales::
DatetimeTickFormatter(
formats=dict(
hours=["%B %Y"],
days |
buchbend/astrolyze | test/test_astro_functions.py | Python | bsd-3-clause | 526 | 0.003802 | import unittest
import doctest
import os
import astrolyze.functions.astro_functions
class Test(unittest.TestCase):
"""Unit t | ests for astro_functions."""
def test_doctests(self):
"""Run astro_functions doctests"""
doctest.testmod(astrolyze.functions.astro_functions)
os.system('/usr/bin/convert black_body.eps testfigures/black_body.jpg')
os.system('cp testfigures/black_body.jpg ../d | oc/figures/')
os.system('rm black_body.eps')
if __name__ == "__main__":
unittest.main()
|
ABusers/A-Certain-Magical-API | funimation/api.py | Python | mit | 3,784 | 0.001057 | # -*- coding: utf-8 -*-
from .httpclient import HTTPClient
from .models import Video, Show
__all__ = ['Funimation']
class Funimation(object):
def __init__(self):
super(Funimation, self).__init__()
self.http = HTTPClient('http://www.funimation.com/',
[('User-Agent', 'Sony-PS3')])
# defaults to the free account user
# hmm... the API doesn't appear to validate the users subscription
# level so if this was changed you might be able to watch
# the paid videos ;)
# FunimationSubscriptionUser = paid account
# FunimationUser = free account
self.user_type = 'FunimationSubscriptionUser'
def get_shows(self, limit=3000, offset=0, sort=None, first_letter=None,
filter=None):
query = self._build_query(locals())
return self._request('feeds/ps/shows', query)
def get_videos(self, show_id, limit=3000, offset=0):
query = self._build_query(locals())
request = self._request('feeds/ps/videos', query)
for req in request:
# Replace get params with the mobile one
# This lets any IP (not only server IP) access content
req.video_url = req.video_url.split('?')[0]+'?9b303b6c62204a9dcb5ce5f5c607'
video_split = req.video_url.split(',')
split_len = len(video_split)
req.video_url = vi | deo_split[0]+video_split[split_len-2]+video_split[split_len-1]
return request
def get_featured(self, limit=3000, offset=0):
query = self._build_query(locals())
return self._request('feeds/ps/featured', query)
def search(self, search):
query = self._build_query(locals())
return self._request('feeds/ps/search', query)
def get_latest(self, limit=3000, offset=0):
if self.user_type == 'FunimationSubscriptionU | ser':
sort = 'SortOptionLatestSubscription'
else:
sort = 'SortOptionLatestFree'
return self.get_shows(limit, offset, sort)
def get_simulcast(self, limit=3000, offset=0):
return self.get_shows(limit, offset, filter='FilterOptionSimulcast')
def get_genres(self):
# we have to loop over all the shows to be sure to get all the genres.
# use a 'set' so duplicates are ignored.
genres = set()
for show in self.get_shows():
if show.get('genres'):
[genres.add(g) for g in show.get('genres').split(',')]
return sorted(genres)
def get_shows_by_genre(self, genre):
shows = []
for show in self.get_shows():
if show.get('genres') and genre in show.get('genres').split(','):
shows.append(show)
return shows
def _request(self, uri, query):
res = self.http.get(uri, query)
if 'videos' in res:
return [Video(**v) for v in res['videos']]
elif isinstance(res, list) and 'series_name' in res[0]:
return [Show(**s) for s in res]
else:
# search results
new_res = {}
# the result is a list when there is no episodes in the results...
if isinstance(res['episodes'], list):
new_res['episodes'] = []
else:
new_res['episodes'] = [Video(**v) for v in
res['episodes']['videos']]
new_res['shows'] = [Show(**s) for s in res['shows']]
return new_res
def _build_query(self, params):
if params is None:
params = {}
else:
params['first-letter'] = params.pop('first_letter', None)
params.pop('self', None)
params.setdefault('ut', self.user_type)
return params |
SivagnanamCiena/nxapi-learning-labs | setup.py | Python | apache-2.0 | 3,329 | 0.001802 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='nxapi-learning-labs',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.0',
description='Sample usage of NX-API.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/CiscoDevNet/nxapi-learning-labs',
# Author details
author='Ken Jarrad, Cisco DevNet',
author_email='kjarrad@cisco.com',
# Choose your license
license='Apache License, Version 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Defined Networking :: NX-API',
# Pick your license as you wish (should match "license" above)
'License :: Apache 2 License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='Nexus SDN',
package_dir = {
'': 'python',
'nxapi': 'python/nxapi'
},
# You can just specify the packages manually here | if your project is
# simple. Or you can use find_packages().
# packages=find_packages(exclude=['config', 'test']),
packages=['nxapi'],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements f | iles see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
"requests",
"ipaddress",
"tabulate",
"future",
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
# 'show_version=show_version:main',
],
},
scripts=[
# 'python/example/show_version.py',
]
)
|
adamcaudill/yawast | tests/test_print_header.py | Python | mit | 640 | 0.001563 | # Copyright (c) 2013 - 2020 Adam Caudill and Contributors.
# This file is part of YAWAST which is released under the MIT license.
# See the LICENSE file or go to https://yawast.org/license/ for full license details.
from unittest import TestCase
from tests import utils
from yawast import main
from yawast._version import get_version
from yawast.shared import output
class TestPrintHeader(TestCase):
def test_print_header(self):
output.setup(False, True, True)
| with utils.capture_sys_output() as (stdout, stderr):
| main.print_header()
self.assertIn("(v%s)" % get_version(), stdout.getvalue())
|
inwotep/lava-dispatcher | lava_dispatcher/tests/test_device_version.py | Python | gpl-2.0 | 3,007 | 0.00133 | # Copyright (C) 2012 Linaro Limited
#
# Author: Antonio Terceiro <antonio.terceiro@linaro.org>
#
# This file is part of LAVA Dispatcher.
#
# LAVA Dispatcher is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# LAVA Dispatcher is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses>.
import re
from lava_dispatcher.tests.helper import LavaDispatcherTestCase, create_device_config, create_config
import os
from lava_dispatcher.device.target import Target
from lava_dispatcher.device.qemu import QEMUTarget
from lava_dispatcher.device.fastmodel import FastModelTarget
from lava_dispatcher.context import LavaContext
from lava_dispatcher.config import get_config
def _create_fastmodel_target():
config = create_device_config('fastmodel01', {'device_type': 'fastmodel',
'simulator_binary': '/path/to/fastmodel',
'license_server': 'foo.local'})
target = FastModelTarget(None, config)
return target
def _create_qemu_target(extra_device_config={}):
create_config('lava-dispatcher.conf', {})
device_config_data = {'device_type': 'qemu'}
device_config_data.update(extra_device_config)
device_config = create_device_config('qemu01', device_config_data)
dispatcher_config = get_config()
context = LavaContext('qemu01', dispatcher_config, None, None, None)
return QEMUTarget(context, device_config)
class TestDeviceVersion(LavaDispatcherTestCase):
def test_base(self):
target = Target(None, None)
self.assertIsInstance(target.get_device_version(), str)
def test_qemu(self):
fake_qemu = os.path.join(os.path.dir | name(__file__), 'test-config', 'bin', 'fake-qemu')
target = _create_qemu_target({'qemu_binary': fake_qemu})
device_version = target.get_device_version()
assert(re.search('^[0-9.]+', device_version))
class TestDevice(LavaDispatcherTestCase):
def setUp(self):
super(TestDevice, self).setUp()
self.target = Target(None, None)
def test_boot_cmds_preprocessing_empty_list(self):
boot_cmds = []
expexted = []
return_value = self.target._boo | t_cmds_preprocessing(boot_cmds)
self.assertEqual(return_value, expexted)
def test_boot_cmds_preprocessing(self):
boot_cmds = ["foo", "bar", ""]
expected = ["foo", "bar"]
return_value = self.target._boot_cmds_preprocessing(boot_cmds)
self.assertEqual(return_value, expected)
|
dreams6/pyerpcn | pyerp/fnd/api/user.py | Python | gpl-3.0 | 1,030 | 0.010373 | # -*- coding: utf-8 -*-
"""
这个模块实现了用户管理相关功能。
create_user() 用于创建用户。
为指定用户绑定职责。
"""
from datetime im | port datetime, date
from pyerp.fnd import models
from pyerp.fnd.gbl import fnd_global
from pyerp.fnd.utils.version import get_svn_revision, get_version
__svnid__ = '$Id: user.py 98 2010-02-01 13:52:36Z yuhere $'
__svn__ = get_svn_revision(__name__)
def create_user(username, password, description, email,
pwd | _expiration_type=0, pwd_lifespan=0,
start_date_active=date.today(), end_date_active=None):
user = models.User()
user.username = username
user.set_password(password)
user.description = description
user.email = email
user.fax = None
user.pwd_expiration_type = pwd_expiration_type # 0:none 1:days 2:accesses
user.pwd_lifespan = pwd_lifespan
user.start_date_active = start_date_active
user.end_date_active = end_date_active
user.save()
return user
|
ypwalter/evennia | evennia/utils/eveditor.py | Python | bsd-3-clause | 26,775 | 0.001793 | """
EvEditor (Evennia Line Editor)
This implements an advanced line editor for editing longer texts
in-game. The editor mimics the command mechanisms of the "VI" editor
(a famous line-by-line editor) as far as reasonable.
Features of the editor:
- undo/redo.
- edit/replace on any line of the buffer.
- search&replace text anywhere in buffer.
- formatting of buffer, or selection, to certain width + indentations.
- allow to echo the input or not, depending on your client.
To use the editor, just import EvEditor from this module
and initialize it:
from evennia.utils.eveditor import EvEditor
EvEditor(caller, loadfunc=None, savefunc=None, quitfunc=None, key="")
- caller is the user of the editor, the one to see all feedback.
- loadfunc(caller) is called when the editor is first launched; the
return from this function is loaded as the starting buffer in the
editor.
- safefunc(caller, buffer) is called with the current buffer when
saving in the editor. The function should return True/False depending
on if the saving was successful or not.
- quitfunc(caller) is called when the editor exits. If this is given,
no automatic quit messages will be given.
- key is an optional identifier for the editing session, to be
displayed in the editor.
"""
import re
from django.conf import settings
from evennia import Command, CmdSet
from evennia.utils import is_iter, fill, dedent
from evennia.commands import cmdhandler
# we use cmdhandler instead of evennia.syscmdkeys to
# avoid some cases of loading before evennia init'd
_CMD_NOMATCH = cmdhandler.CMD_NOMATCH
_CMD_NOINPUT = cmdhandler.CMD_NOINPUT
_RE_GROUP = re.compile(r"\".*?\"|\'.*?\'|\S*")
# use NAWS in the future?
_DEFAULT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
#------------------------------------------------------------
#
# texts
#
#------------------------------------------------------------
_HELP_TEXT = \
"""
<txt> - any non-command is appended to the end of the buffer.
: <l> - view buffer or only line <l>
:: <l> - view buffer without line numbers or other parsing
::: - print a ':' as the only character on the line...
:h - this help.
:w - save the buffer (don't quit)
:wq - save buffer and quit
:q - quit (will be asked to save if buffer was changed)
:q! - quit without saving, no questions asked
:u - (undo) step backwards in undo history
:uu - (redo) step forward in undo history
:UU - reset all changes back to initial state
:dd <l> - delete line <n>
:dw <l> <w> - delete word or regex <w> in entire buffer or on line <l>
:DD - clear buffer
:y <l> - yank (copy) line <l> to the copy buffer
:x <l> - cut line <l> and store it in the copy buffer
:p <l> - put (paste) previously copied line directly after <l>
:i <l> <txt> - insert new text <txt> at line <l>. Old line will move down
:r <l> <txt> - replace line <l> with text <txt>
:I <l> <txt> - insert text at the beginning of line <l>
:A <l> <txt> - append text after the end of line <l>
:s <l> <w> <txt> - search/replace word or regex <w> in buffer or on line <l>
:f <l> - flood-fill entire buffer or line <l>
:fi <l> - indent entire buffer or line <l>
:fd <l> - de-indent entire buffer or line <l>
:echo - turn echoing of the input on/off (helpful for some clients)
Legend:
<l> - line numbers, or range lstart:lend, e.g. '3:7'.
<w> - one word or several enclosed in quotes.
<txt> - longer string, usually not needed to be enclosed in quotes.
"""
_ERROR_LOADFUNC = \
"""
{error}
{rBuffer load function error. Could not load initial data.{n
"""
_ERROR_SAVEFUNC = \
"""
{error}
{rSave function returned an error. Buffer not saved.{n
"""
_ERROR_NO_SAVEFUNC = "{rNo save function defined. Buffer cannot be saved.{n"
_MSG_SAVE_NO_CHANGE = "No changes need saving"
_DEFAULT_NO_QUITFUNC = "Exited editor."
_ERROR_QUITFUNC = \
"""
{error}
{rQuit function gave an error. Skipping.{n
"""
_MSG_NO_UNDO = "Nothing to undo"
_MSG_NO_REDO = "Nothing to redo"
_MSG_UNDO = "Undid one step."
_MSG_REDO = "Redid one step."
#------------------------------------------------------------
#
# Handle yes/no quit question
#
#------------------------------------------------------------
class CmdSaveYesNo(Command):
"""
Save the editor state on quit. This catches
nomatches (defaults to Yes), and avoid saves only if
command was given specifically as "no" or "n".
"""
key = _CMD_NOMATCH
aliases = _CMD_NOINPUT
locks = "cmd:all()"
help_cateogory = "LineEditor"
def func(self):
"Implement the yes/no choice."
# this is only called from inside the lineeditor
# so caller.ndb._lineditor must be set.
self.caller.cmdset.remove(SaveYesNoCmdSet)
if self.raw_string.strip().lower() in ("no", "n"):
# answered no
self.caller.msg(self.caller.ndb._lineeditor.quit())
else:
# answered yes (default)
self.caller.ndb._lineeditor.save_buffer()
self.caller.ndb._lineeditor.quit()
class SaveYesNoCmdSet(CmdSet):
"Stores the yesno question"
key = "quitsave_yesno"
priority = 1
mergetype = "Replace"
def at_cmdset_creation(self):
"at cmdset creation"
self.add(CmdSaveYesNo())
#------------------------------------------------------------
#
# Editor commands
#
#------------------------------------------------------------
class CmdEditorBase(Command):
"""
Base parent for editor commands
"""
locks = "cmd:all()"
help_entry = "LineEditor"
code = None
editor = None
def parse(self):
"""
Handles pre-parsing
Editor commands are on the form
:cmd [li] [w] [txt]
Where all arguments are optional.
li - line number (int), starting from 1. This could also
be a range given as <l>:<l>.
w - word(s) (string), could be encased in quotes.
txt - extra text (string), could be encased in quotes.
"""
linebuffer = []
if self.editor:
linebuffer = self.editor.get_buffer().split("\n")
nlines = len(linebuffer)
# The regular expression will split the line by whitespaces,
# stripping extra whitespaces, except if the text is
# surrounded by single- or double quotes, in which case they
# will be kept together and extra whitespace preserved. You
# can input quotes on the line by alternating single and
# double quotes.
arglist = [part for part in _RE_GROUP.findall(self.args) if part]
temp = []
for arg in arglist:
# we want to clean the quotes, but only one type,
# in case we are nesting.
if arg.startswith('"'):
arg.strip('"')
elif arg.startswith("'"):
arg.strip("'")
temp.append(arg)
arglist = temp
# A dumb split, without grouping quotes
words = self.args.split()
# current line number
cline = nlines - 1
# the first argument could also be a range of line numbers, on the
# form <lstart>:<lend>. Either of the ends could be missing, to
# mean start/end of buffer respectively.
lstart, lend = cline, cline + 1
linerange = False
if arglist and ':' in arglist[0]:
part1, | part2 = arglist[0].split(':')
if part1 and part1.isdigit(): |
lstart = min(max(0, int(part1)) - 1, nlines)
linerange = True
if part2 and part2.isdigit():
lend = min(lstart + 1, int(part2)) + 1
linerange = True
elif arglist and arglist[0].isdigit():
lstart = min(max(0, int(arglist[0]) - 1), nlines)
lend = lstart + 1
linerange = True
if linerange:
arglist = arglist[1:]
# nicer output formatting of the line range.
lstr = ""
if not linerange or lstart + 1 == lend:
lstr = "line %i" % (lstart + 1)
else:
|
dl1ksv/gnuradio | gnuradio-runtime/python/pmt/__init__.py | Python | gpl-3.0 | 1,351 | 0.00148 | #
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
# The presence of this file turns this directory into a Python package
'''
Poly | morphic Types.
The type can really be used to store anything, but also has simple
conversion methods for common data types such as bool, long, or | a
vector.
The polymorphic type simplifies message passing between blocks, as all
of the data is of the same type, including the message. Tags also use
PMTs as data type, so a stream tag can be of any logical data type. In
a sense, PMTs are a way to extend C++' strict typing with something
more flexible.
The PMT library supports the following major types:
bool, symbol (string), integer, real, complex, null, pair, list,
vector, dict, uniform_vector, any (boost::any cast)
'''
import os
try:
from .pmt_python import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "bindings"))
from .pmt_python import *
# due to changes in the PMT_NIL singleton for static builds, we force
# this into Python here.
PMT_NIL = get_PMT_NIL()
PMT_T = get_PMT_T()
PMT_F = get_PMT_F()
PMT_EOF = get_PMT_EOF()
from .pmt_to_python import pmt_to_python as to_python
from .pmt_to_python import python_to_pmt as to_pmt
|
franramirez688/common | test/edition/changevalidator_test.py | Python | mit | 2,188 | 0.000914 | import unittest
from biicode.common.edition.hive import Hive
from biicode.common.model.blob import Blob
from biicode.common.exception import BiiException
from biicode.common.edition import changevalidator
from biicode.common.edition.processors.processor_changes import ProcessorChanges
from biicode.common.conf import BII_FILE_SIZE_LIMIT, BII_HIVE_NUMFILES_LIMIT
from biicode.common.edit | ion.changevalidator import BII_FILE_SIZE_LIMIT_STR
from biicode.common.output_stream import OutputStream
from biicode.common.model.content import Content
class ChangeValidatorTest(unittest.TestCase):
def setUp(self):
self.load = Blob()
def te | st_large_cell_reject(self):
self.load.binary = bytearray(BII_FILE_SIZE_LIMIT)
files = {"user/block/file": (None, self.load)}
biiout = OutputStream()
changevalidator.remove_large_cells(files, biiout)
self.assertEquals(0, len(files))
self.assertEquals("WARN: File user/block/file is bigger "
"than %s: discarded\n" % BII_FILE_SIZE_LIMIT_STR,
str(biiout))
def test_size_reject_accept(self):
self.load.binary = bytearray(BII_FILE_SIZE_LIMIT)
load2 = Blob()
load2.binary = bytearray(BII_FILE_SIZE_LIMIT - 1)
files = {"user/block/filelarge": (None, self.load),
"user/block/filesmall": (None, load2)}
biiout = OutputStream()
changevalidator.remove_large_cells(files, biiout)
self.assertEquals(1, len(files))
self.assertEquals("WARN: File user/block/filelarge is "
"bigger than %s: discarded\n" % BII_FILE_SIZE_LIMIT_STR,
str(biiout))
self.assertIn("user/block/filesmall", files)
def test_hive_num_files_reject(self):
with self.assertRaises(BiiException):
hive = Hive()
changes = ProcessorChanges()
for i in xrange(BII_HIVE_NUMFILES_LIMIT + 1):
name = "user/block/file%d" % i
changes.upsert(name, Content(id_=name, load=Blob()))
hive.update(changes)
changevalidator.check_hive_num_cells(hive)
|
ifduyue/sentry | src/sentry/plugins/sentry_interface_types/models.py | Python | bsd-3-clause | 998 | 0.001002 | """
sentry.plugins.sentry_interface_types.models
~~~~~~~~~~~~~~~~~~~~~ | ~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
import sentry
from sentry.plugins import register
from sentry.plugins.bases.tag import TagPlugin
class InterfaceTypePlugin(TagPlugin):
"""
Automatically adds the 'interface_type' tag from events containing referencing
the class name of each inter | face (e.g. Http, Stacktrace, Exception).
"""
descrption = __doc__
slug = 'interface_types'
title = 'Auto Tag: Interface Types'
version = sentry.VERSION
author = "Sentry Team"
author_url = "https://github.com/getsentry/sentry"
tag = 'interface_type'
project_default_enabled = False
def get_tag_values(self, event):
return [i.rsplit('.', 1)[-1] for i in six.iterkeys(event.interfaces)]
register(InterfaceTypePlugin)
|
ajkxyz/cuda4py | src/cuda4py/blas/__init__.py | Python | bsd-2-clause | 2,953 | 0.000339 | """
Copyright (c) 2014, Samsung Electronics Co.,Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in th | e documentation
and/or other materia | ls provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of Samsung Electronics Co.,Ltd..
"""
"""
cuda4py - CUDA cffi bindings and helper classes.
URL: https://github.com/ajkxyz/cuda4py
Original author: Alexey Kazantsev <a.kazantsev@samsung.com>
"""
"""
Init module for BLAS cffi bindings and helper classes.
"""
from cuda4py.blas._cublas import (CUBLAS,
initialize,
CUBLAS_OP_N,
CUBLAS_OP_T,
CUBLAS_OP_C,
CUBLAS_DATA_FLOAT,
CUBLAS_DATA_DOUBLE,
CUBLAS_DATA_HALF,
CUBLAS_DATA_INT8,
CUBLAS_POINTER_MODE_HOST,
CUBLAS_POINTER_MODE_DEVICE,
CUBLAS_STATUS_SUCCESS,
CUBLAS_STATUS_NOT_INITIALIZED,
CUBLAS_STATUS_ALLOC_FAILED,
CUBLAS_STATUS_INVALID_VALUE,
CUBLAS_STATUS_ARCH_MISMATCH,
CUBLAS_STATUS_MAPPING_ERROR,
CUBLAS_STATUS_EXECUTION_FAILED,
CUBLAS_STATUS_INTERNAL_ERROR,
CUBLAS_STATUS_NOT_SUPPORTED,
CUBLAS_STATUS_LICENSE_ERROR)
|
srvasn/basic-chat-server | constants.py | Python | gpl-3.0 | 513 | 0 | # Declaring constants for use throughout the program
STATE_AUTH = 'AUTH' |
STATE_CHAT = 'CHAT'
SIGN_UP = 'SIGN_UP'
LOGIN = 'REGISTER'
EXIT = 'EXIT'
EXIT_COMMAND = '.quit' # This is what the user types when he/she wants to quit
DB_URL = 'storage.db'
PORT = 1236
LOG_FILE_URL = 'chatserver.log'
TEST_USER_FILE = 'sim_users.js | on'
TEST_MESSAGES = ["Sample Message 1",
"Sample Message 2",
"Sample Message 3",
"Sample Message 4",
"Sample Message 5"]
|
orbeckst/RecSQL | recsql/csv_table.py | Python | gpl-3.0 | 4,336 | 0.003921 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# RecSQL -- a simple mash-up of sqlite and numpy.recsql
# Copyright (C) 2007-2016 Oliver Beckstein <orbeckst@gmail.com>
# Released under the GNU Public License, version 3 or higher (your choice)
"""
:mod:`recsql.csv_table` --- Parse a simple CSV table
====================================================
Turn a CSV table into a numpy array.
Uses :mod:`csv` (requires python 2.6 or better).
.. autoclass:: Table2array
:members: __init__, recarray
.. autofunction:: make_python_name
"""
from __future__ import with_statement, absolute_import
# notes on csv (from http://farmdev.com/talks/unicode/)
# encode temp. to utf-8
# s_bytes = s_uni.encode('utf-8')
# do stuff
# s_bytes.decode('utf-8')
try:
# needs python >= 2.6
import csv
except ImportError:
import warnings
warnings.warn("csv module not available (needs python >=2.6)", category=ImportWarning)
# ... just go ahead and fail later miserably ...
import numpy
import re
from .convert import Autoconverter
# from the cs | v examples: http://docs.python.org/library/csv.html#csv-examples
import codecs
class UTF8Recoder(object):
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reade | r = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader(object):
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
def make_python_name(s, default=None, number_prefix='N',encoding="utf-8"):
"""Returns a unicode string that can be used as a legal python identifier.
:Arguments:
*s*
string
*default*
use *default* if *s* is ``None``
*number_prefix*
string to prepend if *s* starts with a number
"""
if s in ('', None):
s = default
s = str(s)
s = re.sub("[^a-zA-Z0-9_]", "_", s)
if not re.match('\d', s) is None:
s = number_prefix+s
return unicode(s, encoding)
class Table2array(object):
"""Read a csv file and provide conversion to a :class:`numpy.recarray`.
* Depending on the arguments, autoconversion of values can take
place. See :class:`recsql.convert.Autoconverter` for details.
* Table column headers are always read from the first row of the file.
* Empty rows are discarded.
"""
def __init__(self, filename=None, tablename="CSV", encoding="utf-8", **kwargs):
"""Initialize the class.
:Arguments:
*filename*
CSV file (encoded with *encoding*)
*name*
name of the table
*autoconvert*
EXPERIMENTAL. ``True``: replace certain values
with special python values (see :class:`convert.Autoconverter`) and possibly
split values into lists (see *sep*).
``False``: leave everything as it is (numbers as numbers and strings
as strings).
*mode*
mode of the :class:`~convert.Autoconverter`
"""
if filename is None:
raise TypeError("filename is actually required")
self.tablename = tablename
self.autoconvert = Autoconverter(**kwargs).convert
csvtab = UnicodeReader(open(filename, "rb"), encoding=encoding)
self.names = [make_python_name(s,default=n,encoding=encoding) for n,s in enumerate(csvtab.next())]
# read the rest after the column headers
self.records = [tuple(map(self.autoconvert, line)) for line in csvtab \
if len(line) > 0 and not numpy.all(numpy.array(line) == '')]
def recarray(self):
"""Returns data as :class:`numpy.recarray`."""
return numpy.rec.fromrecords(self.records, names=self.names)
|
letolab/airy | airy/utils/unittest/signals.py | Python | bsd-2-clause | 1,682 | 0.002973 | import signal
import weakref
from airy.utils.unittest.compatibility import wraps
__unittest = True
class _InterruptHandler(object):
def __init__(self, default_handler):
self.called = False
self.default_handler = default_handler
def __call__(self, signum, frame):
installed_handler = signal.getsignal(signal.SIGINT)
if installed_handler is not self:
# if we aren't the installed handler, then delegate immediately
# to the default handler
self.default_handler(signum, frame)
| if self.called:
self.default_handler(signum, frame)
self.called = True
for result in _results.keys():
result.stop()
_results = weakref.W | eakKeyDictionary()
def registerResult(result):
_results[result] = 1
def removeResult(result):
return bool(_results.pop(result, None))
_interrupt_handler = None
def installHandler():
global _interrupt_handler
if _interrupt_handler is None:
default_handler = signal.getsignal(signal.SIGINT)
_interrupt_handler = _InterruptHandler(default_handler)
signal.signal(signal.SIGINT, _interrupt_handler)
def removeHandler(method=None):
if method is not None:
@wraps(method)
def inner(*args, **kwargs):
initial = signal.getsignal(signal.SIGINT)
removeHandler()
try:
return method(*args, **kwargs)
finally:
signal.signal(signal.SIGINT, initial)
return inner
global _interrupt_handler
if _interrupt_handler is not None:
signal.signal(signal.SIGINT, _interrupt_handler.default_handler)
|
BitWriters/Zenith_project | zango/lib/python3.5/site-packages/django/conf/locale/es_PR/formats.py | Python | mit | 738 | 0 | # | -*- encoding: utf-8 - | *-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'j \d\e F \d\e Y \a \l\a\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
DATE_INPUT_FORMATS = (
# '31/12/2009', '31/12/09'
'%d/%m/%Y', '%d/%m/%y'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S',
'%d/%m/%Y %H:%M:%S.%f',
'%d/%m/%Y %H:%M',
'%d/%m/%y %H:%M:%S',
'%d/%m/%y %H:%M:%S.%f',
'%d/%m/%y %H:%M',
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
ales-erjavec/orange-bio | orangecontrib/bio/widgets3/OWGeneNetwork.py | Python | gpl-3.0 | 16,602 | 0.000723 | import sys
from collections import namedtuple
from AnyQt.QtWidgets import QSizePolicy, QLayout
from AnyQt.QtCore import Slot
import Orange.data
from Orange.widgets.utils.datacaching import data_hints
from Orange.widgets import widget, gui, settings
from Orange.widgets.utils import itemmodels
from Orange.widgets.utils.concurrent import ThreadExecutor, Task, methodinvoke
from orangecontrib import network
from .. import ppi, taxonomy, gene
from ..utils import serverfiles, compat
Source = namedtuple(
"Source",
["name", "constructor", "tax_mapping", "sf_domain", "sf_filename",
"score_filter"]
)
SOURCES = [
Source("BioGRID", ppi.BioGRID, ppi.BioGRID.TAXID_MAP,
"PPI", ppi.BioGRID.SERVER_FILE, False),
Source("STRING", ppi.STRING, ppi.STRING.TAXID_MAP,
"PPI", ppi.STRING.FILENAME, True)
]
class OWGeneNetwork(widget.OWWidget):
name = "Gene Network"
description = "Extract a gene network for a set of genes."
icon = "../widgets/icons/GeneNetwork.svg"
inputs = [("Data", Orange.data.Table, "set_data")]
outputs = [("Network", network.Graph)]
settingsHandler = settings.DomainContextHandler()
taxid = settings.Setting("9606")
gene_var_index = settings.ContextSetting(-1)
use_attr_names = settings.ContextSetting(False)
network_source = settings.Setting(1)
include_neighborhood = settings.Setting(True)
min_score = settings.Setting(0.9)
want_main_area = False
def __init__(self, parent=None):
super().__init__(parent)
self.taxids = taxonomy.common_taxids()
self.current_taxid_index = self.taxids.index(self.taxid)
self.data = None
self.geneinfo = None
self.nettask = None
self._invalidated = False
box = gui.widgetBox(self.controlArea, "Info")
self.info = gui.widgetLabel(box, "No data on input\n")
box = gui.widgetBox(self.controlArea, "Organism")
self.organism_cb = gui.comboBox(
box, self, "current_taxid_index",
items=map(taxonomy.name, self.taxids),
callback=self._update_organism
)
box = gui.widgetBox(self.controlArea, "Genes")
self.genes_cb = gui.comboBox(
box, self, "gene_var_index", callback=self._update_query_genes
)
self.varmodel = itemmodels.VariableListModel()
self.genes_cb.setModel(self.varmodel)
gui.checkBox(
box, self, "use_attr_names",
"Use attribute names",
callback=self._update_query_genes
)
box = gui.widgetBox(self.controlArea, "Network")
gui.comboBox(
box, self, "network_source",
items=[s.name for s in SOURCES],
callback=self._on_source_db_changed
)
gui.checkBox(
box, self, "include_neighborhood",
"Include immediate gene neighbors",
callback=self.invalidate
)
self.score_spin = gui.doubleSpin(
box, self, "min_score", 0.0, 1.0, step=0.001,
label="Minimal edge score",
callback=self.invalidate
)
self.score_spin.setEnabled(SOURCES[self.network_source].score_filter)
box = gui.widgetBox(self.controlArea, "Commit")
gui.button(box, self, "Retrieve", callback=self.commit, default=True)
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.layout().setSizeConstraint(QLayout.SetFixedSize)
self.executor = ThreadExecutor()
def set_data(self, data):
self.closeContext()
self.data = data
if data is not None:
self.varmodel[:] = string_variables(data.domain)
taxid = data_hints.get_hint(data, "taxid", default=self.taxid)
if taxid in self.taxids:
self.set_organism(self.taxids.index(taxid))
self.use_attr_names = data_hints.get_hint(
data, "genesinrows", default=self.use_attr_names
)
if not (0 <= self.gene_var_index < len(self.varmodel)):
self.gene_var_index = len(self.varmodel) - 1
self.openContext(data)
self.invalidate()
self.commit()
else:
self.varmodel[:] = []
self.send("Network", None)
def set_source_db(self, dbindex):
self.network_source = dbindex
self.invalidate()
def set_organism(self, index):
self.current_taxid_index = index
self.taxid = self.taxids[index]
self.invalidate()
def set_gene_var(self, index):
self.gene_var_index = index
self.invalidate()
def query_genes(self):
if self.use_attr_names:
if self.data is not None:
return [var.name for var in self.data.domain.attributes]
else:
return []
elif self.gene_var_index >= 0:
var = self.varmodel[self.gene_var_index]
genes = [str(inst[var]) for inst in self.data
if not compat.isunknown(inst[var])]
return list(unique(genes))
else:
return []
def invalidate(self):
self._invalidated = True
if self.nettask is not None:
self.nettask.finished.disconnect(self._on_result_ready)
self.nettask.future().cancel()
self.nettask = None
@Slot()
def advance(self):
self.progressBarValue = (self.progressBarValue + 1) % 100
@Slot(float)
def set_progress(self, value):
self.progressBarSet(value, processEvents=None)
def commit(self):
include_neighborhood = self.include_neighborhood
query_genes = self.query_genes()
source = SOURCES[self.network_source]
if source.score_filter:
min_score = self.min_score
assert source.name == "STRING"
min_score = min_score * 1000
else:
min_score = None
taxid = self.taxid
progress = methodinvoke(self, "advance")
if self.geneinfo is None:
self.geneinfo = self.executor.submit(
fetch_ncbi_geneinfo, taxid, progress
)
geneinfo_f = self.geneinfo
taxmap = source.tax_mapping
db_taxid = taxmap.get(taxid, taxid)
if db_taxid is None:
raise ValueError("invalid taxid for this network")
def fetch_network():
geneinfo = geneinfo_f.result()
ppidb = fetch_ppidb(source, db_taxid, progress)
return get_gene_network(ppidb, geneinfo, db_taxid, query_genes,
| include_neighborhood=include_neighborhood,
min_score=min_score,
progress=methodinvoke(self, "set_progress", (float,)))
self.nettask = Task(function=fetch_network)
self.nettask.finished.connect(self._on_result_ready)
self.executor.submit(self.nettask)
self.setBlocking(True)
self.setEnabled(False)
self.progressBarInit()
self._invalidated | = False
self._update_info()
@Slot()
def _on_result_ready(self,):
self.progressBarFinished()
self.setBlocking(False)
self.setEnabled(True)
net = self.nettask.result()
self._update_info()
self.send("Network", net)
def _on_source_db_changed(self):
source = SOURCES[self.network_source]
self.score_spin.setEnabled(source.score_filter)
self.invalidate()
def _update_organism(self):
self.taxid = self.taxids[self.current_taxid_index]
if self.geneinfo is not None:
self.geneinfo.cancel()
self.geneinfo = None
self.invalidate()
def _update_query_genes(self):
self.invalidate()
def _update_info(self):
if self.data is None:
self.info.setText("No data on input\n")
else:
names = self.query_genes()
lines = ["%i unique genes on input" % len(set(names))]
if self.nettask is not None:
if not self.nettask.future().done():
|
ClearCorp/odoo-clearcorp | account_payment_report/report/account_payment_report.py | Python | agpl-3.0 | 2,163 | 0 | # -*- coding: utf-8 -*-
# © 2016 ClearCorp
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import re
from openerp.report import report_sxw
from openerp import models, _
from openerp.exceptions import Warning
CURRENCY_NAMES = {
'USD': {
'en': 'Dollars',
'es': 'DOLARES',
},
'EUR': {
'en': 'Euros',
'es': 'EUROS',
},
'CRC': {
'en': 'Colones',
'es': 'COLONES',
}
}
class ReportAccountPayment(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context=None):
super(ReportAccountPayment, self).__init__(cr, uid, name,
context=context)
self.localcontext.update({
'get_text_amount': self._get_text_amount,
})
def _get_currency_name(self, currency_id):
currency_obj = self.pool.get('res.currency')
currency = currency_obj.browse(self.cr, self.uid, currency_id)
lang = self.localcontext.get('lang')
if currency.name in CURRENCY_NAMES:
return CU | RRENCY_NAMES[currency.name][lang]
raise Warning(_('Currency not supported by this rep | ort.'))
def _get_text_amount(self, amount, currency_id):
es_regex = re.compile('es.*')
en_regex = re.compile('en.*')
lang = self.localcontext.get('lang')
if es_regex.match(lang):
from openerp.addons.l10n_cr_amount_to_text import amount_to_text
return amount_to_text.number_to_text_es(
amount, '',
join_dec=' Y ', separator=',', decimal_point='.')
elif en_regex.match(lang):
from openerp.tools import amount_to_text_en
return amount_to_text_en.amount_to_text(
amount, lang='en', currency='')
else:
raise Warning(_('Language not supported by this report.'))
class report_account_payment(models.AbstractModel):
_name = 'report.account_payment_report.account_payment_report'
_inherit = 'report.abstract_report'
_template = 'account_payment_report.account_payment_report'
_wrapped_report_class = ReportAccountPayment
|
aipescience/daiquiri-admin | daiquiri/data.py | Python | apache-2.0 | 2,181 | 0.001834 | from daiquiri.exceptions import Dai | quiriException
class Data():
def __init__(self, connection, dryrun=False):
self.connection = connection
self.dryrun = dryrun
def | fetch_databases(self):
response = self.connection.get('/data/databases/')
if response['status'] != 'ok':
raise DaiquiriException(response['errors'])
else:
return response['databases']
def update_table(self, table_id, table):
if not self.dryrun:
response = self.connection.get('/data/tables/show/id/%s' % table_id)
data = {
'database_id': response['row']['database_id'],
'order': response['row']['order'],
'name': response['row']['name'],
'description': response['row']['description'],
'publication_role_id': response['row']['publication_role_id']
}
data.update(table)
response = self.connection.post('/data/tables/update/id/%s' % table_id, data)
if response['status'] != 'ok':
raise DaiquiriException(response['errors'])
def update_column(self, column_id, column):
if not self.dryrun:
response = self.connection.get('/data/columns/show/id/%s' % column_id)
data = {
'table_id': response['row']['table_id'],
'order': response['row']['order'],
'name': response['row']['name'],
'type': response['row']['type'],
'unit': response['row']['unit'],
'ucd': response['row']['ucd'],
'description': response['row']['description']
}
data.update(column)
response = self.connection.post('/data/columns/update/id/%s' % column_id, data)
if response['status'] != 'ok':
raise DaiquiriException(response['errors'])
def store_function(self, function):
if not self.dryrun:
response = self.connection.post('/data/functions/create', function)
if response['status'] != 'ok':
raise DaiquiriException(response['errors'])
|
rfhk/rqn-custom | purchase_order_sale_order_nrq/models/purchase_order.py | Python | agpl-3.0 | 349 | 0.002865 | # -*- coding: utf-8 -*-
# Copyright 2018 Quartile Limited
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class PurchaseOrder(models.Model):
_inherit = "purchase.order"
sale_ids = fields.Many2many(
"sa | le.order", related="order_line.sale_ids", string="Related Sales Order(s)" |
)
|
a25kk/vfu | src/vfu.events/vfu/events/myform.py | Python | mit | 3,814 | 0.014421 | import random
import zope.schema
import zope.interface
from zope.i18nmessageid import MessageFactory
from zope.component import getUtility, getMultiAdapter
from zope.browserpage.viewpagetemplatefile import ViewPageTemplateFile as Zope3PageTemplateFile
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile as FiveViewPageTemplateFile
from Products.CMFCore.utils import getToolByName
from Products.CMFCore.interfaces import ISiteRoot
from Products.CMFPlone.utils import _createObjectByType
from Products.CMFPlone.interfaces.controlpanel import IMailSchema
from Products.statusmessages.interfaces import IStatusMessage
import z3c.form
import plone.z3cform.templates
from plone.registry.interfaces import IRegistry
from smtplib import SMTPException, SMTPRecipientsRefused
from vfu.events import MessageFactory as _
from vfu.events.utils import trusted
from vfu.events.registration import IBasicForm
class MyForm(z3c.form.form.Form):
""" Display event with form """
template = Zope3PageTemplateFile("templates/form.pt")
fields = z3c.form.field.Fields(IBasicForm)
ignoreContext = True
enable_unload_protection = False
output = None
### ! fieldeset
fields['gender'].widgetFactory = z3c.form.browser.radio.RadioFieldWidget
fields['pricing'].widgetFactory = z3c.form.browser.radio.RadioFieldWidget
def _redirect(self, target=''):
if not target:
portal_state = getMultiAdapter((self.context, self.request), name=u'plone_portal_state')
target = portal_state.portal_url()
self.request.response.redirect(target)
@z3c.form.button.buttonAndHandler(_(u"Save"), name='submit')
def submit(self, action):
data, errors = self.extractData()
if errors:
self.status = _(u"Please correct errors")
return
folder = self.context
id = str(random.randint(0, 99999999))
new_obj = _createObjectByType("vfu.events.registration", folder, id, lastname = data['lastname'],
firstname = data['firstname'], gender = data['gender'], job = data['job'], organization = data['organization'],
email = data['email'], phone = data['phone'], street = data['street'], number = data['number'],
zipcode = data['zipcode'], city = data['city'], country = data['country'], pri | cing = data['pricing'],
comments = data['comments'])
portal = getToolByName(self, 'portal_url').getPortalObject()
encoding = portal.getProperty('email_charset', 'utf-8')
trusted_template = | trusted(portal.registration_email)
mail_text = trusted_template(
self, charset=encoding, reg_data = new_obj, event = self.context)
subject = self.context.translate(_(u"New registration"))
m_to = data['email']
## notify admin about new registration
if isinstance(mail_text, unicode):
mail_text = mail_text.encode(encoding)
host = getToolByName(self, 'MailHost')
registry = getUtility(IRegistry)
mail_settings = registry.forInterface(IMailSchema, prefix='plone')
m_from = mail_settings.email_from_address
try:
host.send(mail_text, m_to, m_from, subject=subject,
charset=encoding, immediate=True, msg_type="text/html")
except SMTPRecipientsRefused:
raise SMTPRecipientsRefused(
_(u'Recipient address rejected by server.'))
except SMTPException as e:
raise(e)
IStatusMessage(self.request).add(_(u"Submit complete"), type='info')
return self._redirect(target=self.context.absolute_url())
form_frame = plone.z3cform.layout.wrap_form(MyForm, index=FiveViewPageTemplateFile("templates/layout.pt")) |
paninski-lab/yass | tests/conftest.py | Python | apache-2.0 | 2,794 | 0 | import shutil
import tempfile
import numpy as np
import os
from os.path import getsize
import pytest
import yaml
from util import PATH_TO_TESTS, seed, dummy_predict_with_threshold
PATH_TO_ASSETS = os.path.join(PATH_TO_TESTS, 'assets')
PATH_TO_RETINA_DIR = os.path.join(PATH_TO_ASSETS, 'recordings', 'retina')
PATH_TO_RETINA_CONFIG_DIR = os.path.join(PATH_TO_RETINA_DIR, 'config')
@pytest.fixture(autouse=True)
def setup():
seed(0)
@pytest.fixture
def patch_triage_network(monkeypatch):
to_patch = 'yass.neuralnetwork.model.KerasModel.predict_with_threshold'
monkeypatch.setattr(to_patch, dummy_predict_with_threshold)
yield
def _path_to_config():
return os.path.join(PATH_TO_RETINA_CONFIG_DIR, 'config.yaml')
def _data_info():
with open(_path_to_config()) as f:
d = yaml.load(f)
return d
@pytest.fixture()
def data_info():
return _data_info()
@pytest.fixture()
def data():
info = _data_info()['recordings']
path = os.path.join(PATH_TO_RETINA_DIR, 'data.bin')
d = np.fromfile(path, dtype=info['dtype'])
n_observations = int(getsize(path) / info['n_channels'] /
np.dtype(info['dtype']).itemsize)
d = d.reshape(n_observations, info['n_channels'])
return d
@pytest.fixture()
def path_to_tests():
return PATH_TO_TESTS
@pytest.fixture()
def path_to_performance():
return os.path.join(PATH_TO_TESTS, 'performance/')
@pytest.fixture
def make_tmp_folder():
temp = tempfile.mkdtemp()
yield temp
shutil.rmtree(temp)
@pytest.fixture()
def path_to_data():
return os.path.join(PATH_TO_RETINA_DIR, 'data.bin')
@pytest.fixture()
def path_to_geometry():
return os.path.join(PATH_TO_RETINA_DIR, 'geometry.npy')
@pytest.fixture()
def path_to_sample_pipeline_folder():
return os.path.join(PATH_TO_RETINA_DIR,
'sample_pip | eline_output')
@pytest.fixture()
def path_to_standardized_data | ():
return os.path.join(PATH_TO_RETINA_DIR,
'sample_pipeline_output', 'preprocess',
'standardized.bin')
@pytest.fixture()
def path_to_output_reference():
return os.path.join(PATH_TO_ASSETS, 'output_reference')
@pytest.fixture
def path_to_config():
return _path_to_config()
@pytest.fixture
def path_to_config_threshold():
return os.path.join(PATH_TO_RETINA_CONFIG_DIR, 'config_threshold.yaml')
@pytest.fixture
def path_to_config_with_wrong_channels():
return os.path.join(PATH_TO_RETINA_CONFIG_DIR,
'wrong_channels.yaml')
@pytest.fixture
def path_to_txt_geometry():
return os.path.join(PATH_TO_ASSETS, 'test_files', 'geometry.txt')
@pytest.fixture
def path_to_npy_geometry():
return os.path.join(PATH_TO_ASSETS, 'test_files', 'geometry.npy')
|
johm/infoshopkeeper | popups/checkout.py | Python | gpl-2.0 | 2,958 | 0.008114 | # Copyright 2006 John Duda
# This file is part of Infoshopkeeper.
# Infoshopkeeper is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or any later version.
# Infoshopkeeper is distributed in the hope that it will be useful,
# but WITHO | UT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General | Public License
# along with Infoshopkeeper; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
from wxPython.wx import *
import os
import datetime
from objects.emprunt import Emprunt
from popups.members import AddMemberPanel, ShowMembersPanel
class CheckoutPopup(wxDialog):
def __init__(self, parent):
self.parent=parent
wxDialog.__init__(self, parent,-1,"Check out items")
self.mastersizer = wxBoxSizer(wxVERTICAL)
self.static1 = wxStaticText(self, -1, "Check out to :")
self.mastersizer.Add(self.static1)
self.notebook = wxNotebook(self, -1, style=wxNB_TOP)
self.new_member_panel = AddMemberPanel(parent=self.notebook, main_window=parent,
on_successful_add=self.Borrow, cancel=self.Close)
self.notebook.AddPage(self.new_member_panel, "New member")
self.show_member_panel = ShowMembersPanel(parent=self.notebook, main_window=parent, motherDialog=self, on_select=self.Borrow)
self.notebook.AddPage(self.show_member_panel, "Existing member")
self.mastersizer.Add(self.notebook)
self.SetSizer(self.mastersizer)
for i in self.parent.orderbox.items:
print i.database_id, "... ", i.id
#self.b = wxButton(self, -1, "Checkout", (15, 80))
#EVT_BUTTON(self, self.b.GetId(), self.Checkout)
#self.b.SetDefault()
self.mastersizer.SetSizeHints(self)
def Borrow(self, id):
borrower = self.parent.membersList.get(id)
print borrower
for i in self.parent.orderbox.items:
# Check if this work on sqlobject 0.7... I got
# lots of problem on 0.6.1, and itemID __isn't__
# defined in emprunt, which is plain weirdness
e = Emprunt(borrower = id, itemID=i.database_id)
print i.database_id
self.parent.orderbox.setBorrowed()
self.parent.orderbox.void()
self.Close()
def OnCancel(self,event):
self.EndModal(1)
def Checkout(self,event):
borrower=self.borrower.GetValue()
if len(borrower)>0:
today="%s" % datetime.date.today()
self.parent.orderbox.change_status(today+"-"+borrower)
self.parent.orderbox.void()
self.Close()
|
jerodg/hackerrank-python | python/02.Strings/04.FindAString/template.py | Python | mit | 209 | 0 | def c | ount_substring(string, sub_string):
return
if __name__ == '__main__':
string = input().strip()
sub_string = in | put().strip()
count = count_substring(string, sub_string)
print(count)
|
istartsev/aws_helper-git | tests/test_s3.py | Python | apache-2.0 | 2,939 | 0.000681 | import filecmp
import shutil
import errno
import os
from unittest import TestCase
import time
from faker import Faker
from aws_helper.s3 import s3_helper as s3
from tests.settings import S3Settings
class BaseTest(TestCase):
@classmethod
def _clearTestFolder(cls):
try:
shutil.rmtree(S3Settings.TEST_FOLDER)
except Exception as error:
print('Failed to clear TestFolder. The reason:', str(error))
@classmethod
def setUpClass(cls):
super().setUpClass()
# Clearing folder with test data
cls._clearTestFolder()
cls._client = s3.S3Helper(S3Settings.S3_ACCESS_KEY,
S3Settings.S3_SECRET_KEY, None, None)
@classmethod
def tearDownClass(cls):
cls._clearTestFolder()
super().tearDownClass()
@classmethod
def _isBucketExist(cls, bucket_name):
response = cls._client.list_buckets()
bl = [bucket['Name'] for bucket in response['Buckets']]
if bucket_name in bl:
return True
return False
class TestS3File(BaseTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
# Need to create a test bucket if it doesn't not exist
if cls._isBucketExist(S3Settings.BUCKET_NAME):
cls._client.create_bucket(S3Settings.BUCKET_NAME)
# Reinitialize _client with the preset bucket
cls._client = s3.S3Helper(S3Settings.S3_ACCESS_KEY,
S3Settings.S3_SECRET_KEY,
S3Settings.BUCKET_NAME)
cls._fake = Faker()
cls._filename = '%s/test.file' % S3Settings.TEST_FOLDER
if not os.path.exists(os.path.dirname(cls._filename)):
try:
os.makedirs(os.path.dirname(cls._filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with op | en(cls._filename, 'w') as file:
file.write(cls._fake.text())
def testUploadDownloadFile(self):
dw_filename = self._filename + '_dw'
self._client.upload_file(self._filename, self._filename)
| self._client.download_file_new(self._filename, dw_filename)
result = filecmp.cmp(self._filename, dw_filename)
self.assertTrue(result)git
class TestS3Bucket(BaseTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._bucket_name = 'TEST_AWS_Helper_bucket_%f' % time.time()
cls._client = s3.S3Helper(S3Settings.S3_ACCESS_KEY,
S3Settings.S3_SECRET_KEY)
def testCreateDeleteBucket(self):
self._client.create_bucket(self._bucket_name)
self.assertTrue(self._isBucketExist(self._bucket_name))
self._client.delete_bucket(self._bucket_name)
self.assertFalse(self._isBucketExist(self._bucket_name))
|
endlessm/chromium-browser | third_party/llvm/lldb/test/API/python_api/interpreter/TestCommandInterpreterAPI.py | Python | bsd-3-clause | 3,209 | 0 | """Test the SBCommandInterpreter APIs."""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class CommandInterpreterAPICase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break on inside main.cpp.
self.line = line_number('main.c', 'Hello world.')
@add_test_categories(['pyapi'])
def test_with_process_launch_api(self):
"""Test the SBCommandInterpreter APIs."""
self.build()
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Retrieve the associated command interpreter from our debugger.
ci = self.dbg.GetCommandInterpreter()
self.assertTrue(ci, VALID_COMMAND_INTERPRETER)
# Exercise some APIs....
self.assertTrue(ci.HasCommands())
self.assertTrue(ci.HasAliases())
self.assertTrue(ci.HasAliasOptions())
self.assertTrue(ci.CommandExists("breakpoint"))
self.assertTrue(ci.CommandExists("target"))
self.assertTrue(ci.CommandExists("platform"))
self.assertTrue(ci.AliasExists("file"))
self.assertTrue(ci.AliasExists("run"))
self.assertTrue(ci.AliasExists("bt"))
res = lldb.SBCommandReturnObject()
ci.HandleCommand("breakpoint set -f main.c -l %d" % self.line, res)
self.assertTrue(res.Succeeded())
ci.HandleCommand("process launch", res)
self.assertTrue(res.Succeeded())
# Boundary conditions should not crash lldb!
self.assertFalse(ci.CommandExists(None))
self.assertFalse(ci.AliasExists(None))
ci.HandleCommand(None, res)
self.assertFalse(res.Succeeded())
res.AppendMessage("Just appended a message.")
res.AppendMessage(None)
if self.TraceOn():
print(res)
process = ci.GetProcess()
self.assertTrue(process)
import lldbsuite.test.lldbutil as lldbutil
if process.GetState() != lldb.eStateStopped:
self.fail("Process should be in the 'stopped' state, "
"instead the actual state is: '%s'" %
lldbutil.state_type_to_str(process.GetState()))
if self.TraceOn():
lldbutil.print_stacktraces(process)
@add_test_categories(['pyapi'])
def test_command_output(self):
"""Test command output handling."""
ci = self.dbg.GetCommandInterpreter()
self.assertTrue(ci, VALID_ | COMMAND_INTERPRETER)
# T | est that a command which produces no output returns "" instead of
# None.
res = lldb.SBCommandReturnObject()
ci.HandleCommand("settings set use-color false", res)
self.assertTrue(res.Succeeded())
self.assertIsNotNone(res.GetOutput())
self.assertEquals(res.GetOutput(), "")
self.assertIsNotNone(res.GetError())
self.assertEquals(res.GetError(), "")
|
bernard357/shellbot | tests/updaters/test_elastic.py | Python | apache-2.0 | 2,830 | 0.000353 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from elasticsearch import ConnectionError
import gc
import logging
import mock
from multiprocessing import Process, Queue
import os
import sys
from shellbot import Context, Engine
from shellbot.events import Message
from shellbot.updaters import ElasticsearchUpdater
my_engine = Engine()
class UpdaterTests(unittest.TestCase):
def tearDown(self):
collected = gc.collect()
logging.info("Garbage collector: collected %d objects." % (collected))
def test_init(self):
logging.info('***** init')
u = ElasticsearchUpdater()
self.assertEqual(u.engine, None)
u = ElasticsearchUpdater(engine=my_engine)
self.assertEqual(u.engine, my_engine)
def test_on_init(self):
logging.info('***** on_init')
u = ElasticsearchUpdater()
self.assertEqual(u.host, None)
u = ElasticsearchUpdater(host=None)
self.assertEqual(u.host, None)
u = ElasticsearchUpdater(host='')
self.assertEqual(u.host, '')
u = ElasticsearchUpdater(host='elastic.acme.com')
self.assertEqual(u.host, 'elastic.acme.com')
def test_get_host(self):
logging.info('***** get_host')
u = ElasticsearchUpdater(engine=my_engine)
self.assertEqual(u.get_host(), 'localhost:9200')
u = ElasticsearchUpdater(engine=my_engine, host=None)
| self.assertEqual(u.get_host(), 'localhost:9200')
u = ElasticsearchUpdater(engine=my_engine, host='')
self.assertEqual(u.get_host(), 'localhost:9200')
u = ElasticsearchUpdater(engine=my_engine, host= | 'elastic.acme.com')
self.assertEqual(u.get_host(), 'elastic.acme.com')
def test_on_bond(self):
logging.info('***** on_bond')
u = ElasticsearchUpdater(host='this.does.not.exist')
with self.assertRaises(Exception):
u.on_bond(bot='*dummy')
def test_put(self):
logging.info('***** put')
class FakeDb(object):
def __init__(self):
self.expected = None
def index(self, index, doc_type, body):
assert index == 'shellbot'
assert doc_type == 'event'
assert body == self.expected
u = ElasticsearchUpdater()
u.db = FakeDb()
message_1 = Message({
'person_label': 'alice@acme.com',
'text': 'a first message',
})
u.db.expected = message_1.attributes
u.put(message_1)
message_2 = Message({
'person_label': 'bob@acme.com',
'text': 'a second message',
})
u.db.expected = message_2.attributes
u.put(message_2)
if __name__ == '__main__':
Context.set_logger()
sys.exit(unittest.main())
|
ekcs/congress | congress/tests/api/test_driver_model.py | Python | apache-2.0 | 3,604 | 0 | # Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of t | he License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an | "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from congress.api.system import driver_model
from congress.api import webservice
from congress import harness
from congress.managers import datasource as datasource_manager
from congress.tests import base
from congress.tests import helper
class TestDriverModel(base.SqlTestCase):
def setUp(self):
super(TestDriverModel, self).setUp()
cfg.CONF.set_override(
'drivers',
['congress.tests.fake_datasource.FakeDataSource'])
self.cage = harness.create(helper.root_path())
self.datasource_mgr = datasource_manager.DataSourceManager
self.datasource_mgr.validate_configured_drivers()
req = {'driver': 'fake_datasource',
'name': 'fake_datasource'}
req['config'] = {'auth_url': 'foo',
'username': 'foo',
'password': 'password',
'tenant_name': 'foo'}
self.datasource = self.datasource_mgr.add_datasource(req)
self.engine = self.cage.service_object('engine')
self.api_system = self.cage.service_object('api-system')
self.driver_model = (
driver_model.DatasourceDriverModel("driver-model", {},
policy_engine=self.engine)
)
def tearDown(self):
super(TestDriverModel, self).tearDown()
def test_drivers_list(self):
context = {}
expected_ret = {"results": [
{
"description": "This is a fake driver used for testing",
"id": "fake_datasource"
}
]}
ret = self.driver_model.get_items({}, context)
self.assertEqual(expected_ret, ret)
def test_driver_details(self):
context = {
"driver_id": "fake_datasource"
}
expected_ret = {
"config": {
"auth_url": "required",
"endpoint": "(optional)",
"password": "required",
"poll_time": "(optional)",
"region": "(optional)",
"tenant_name": "required",
"username": "required"
},
"description": "This is a fake driver used for testing",
"id": "fake_datasource",
"module": "congress.tests.fake_datasource.FakeDataSource",
"secret": ["password"],
"tables": [{'columns': [
{'description': 'None', 'name': 'id'},
{'description': 'None', 'name': 'name'}],
'table_id': 'fake_table'}
]
}
ret = self.driver_model.get_item('fake_datasource', {}, context)
self.assertEqual(expected_ret, ret)
def test_invalid_driver_details(self):
context = {
"driver_id": "invalid-id"
}
self.assertRaises(webservice.DataModelException,
self.driver_model.get_item,
'invalid-id', {}, context)
|
olgamelnichuk/NGSPyEasy | examples/trivial/library/dockercmd.py | Python | gpl-2.0 | 2,283 | 0.001314 | #!/usr/bin/env python
import glob
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(required=True, default=None, type='str'),
image=dict(required=True, default=None, type='str'),
volumes=dict(required=False, default=[], type='list'),
environment=dict(required=False, default=[], type='list'),
working_dir=dict(required=False, default=None, type='str'),
secure=dict(required=False, default=True, type='bool'),
sudo=dict(required=False, default=True, type='bool'),
rm=dict(required=False, default=True, type='bool'),
creates=dict(required=False, default=[], type='list')
)
)
command = module.params['command']
image = module.params['image']
volumes = module.params['volumes']
environment = module.params['environment']
working_dir = module.params['working_dir']
secure = module.params['secure']
sudo = module.params['sudo']
rm = | module.params['rm']
creates = module.params['creates']
if len(creates) > 0:
uncreated = [x for x in creates if not glob.glob(os.path.expanduser(x))]
if len(uncreated) == 0:
module.exit_json(
cmd=command,
stdout="skipped, since %s exi | st" % creates,
changed=False,
stderr=False,
rc=0
)
cmd = []
if secure:
cmd.append("sudo dockercmd run")
else:
cmd.append(("sudo " if sudo else "") + "docker run")
if rm:
cmd.append("--rm")
if working_dir is not None:
cmd.append("-w " + working_dir)
cmd += ["-e " + x for x in environment]
cmd += ["-v " + x for x in volumes]
cmd += [image, command]
proc = subprocess.Popen(
["/bin/bash", "-c", " ".join(cmd)],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
lines = []
for line in iter(proc.stdout.readline, b''):
lines.append(line)
proc.stdout.close()
result = dict(stdout="".join(lines),
stdout_lines=lines)
module.exit_json(**result)
# ===========================================
# import module snippets
from ansible.module_utils.basic import *
main()
|
openstack/python-openstacksdk | openstack/tests/unit/identity/v3/test_application_credential.py | Python | apache-2.0 | 2,214 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# | distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# | under the License.
from openstack.tests.unit import base
from openstack.identity.v3 import application_credential
EXAMPLE = {
"user": {
"id": "8ac43bb0926245cead88676a96c750d3"},
"name": 'monitoring',
"secret": 'rEaqvJka48mpv',
"roles": [
{"name": "Reader"}
],
"expires_at": '2018-02-27T18:30:59Z',
"description": "Application credential for monitoring",
"unrestricted": "False",
"project_id": "3",
"links": {"self": "http://example.com/v3/application_credential_1"}
}
class TestApplicationCredential(base.TestCase):
def test_basic(self):
sot = application_credential.ApplicationCredential()
self.assertEqual('application_credential', sot.resource_key)
self.assertEqual('application_credentials', sot.resources_key)
self.assertEqual('/users/%(user_id)s/application_credentials',
sot.base_path)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_commit)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = application_credential.ApplicationCredential(**EXAMPLE)
self.assertEqual(EXAMPLE['user'], sot.user)
self.assertEqual(EXAMPLE['name'], sot.name)
self.assertEqual(EXAMPLE['secret'], sot.secret)
self.assertEqual(EXAMPLE['description'], sot.description)
self.assertEqual(EXAMPLE['expires_at'], sot.expires_at)
self.assertEqual(EXAMPLE['project_id'], sot.project_id)
self.assertEqual(EXAMPLE['roles'], sot.roles)
self.assertEqual(EXAMPLE['links'], sot.links)
|
Kniyl/mezzanine | mezzanine/twitter/models.py | Python | bsd-2-clause | 6,647 | 0.00015 | from __future__ import unicode_literals
from future.builtins import str
from datetime import datetime
import re
try:
from urllib.parse import quote
except ImportError:
# Python 2
from urllib import quote
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import urlize
from django.utils.timezone import make_aware, utc
from django.utils.translation import ugettext_lazy as _
from requests_oauthlib import OAuth1
import requests
from mezzanine.conf import settings
from mezzanine.twitter import QUERY_TYPE_CHOICES, QUERY_TYPE_USER, \
QUERY_TYPE_LIST, QUERY_TYPE_SEARCH
from mezzanine.twitter import get_auth_settings
from mezzanine.twitter.managers import TweetManager
re_usernames = re.compile("@([0-9a-zA-Z+_]+)", re.IGNORECASE)
re_hashtags = re.compile("#([0-9a-zA-Z+_]+)", re.IGNORECASE)
replace_hashtags = "<a href=\"http://twitter.com/search?q=%23\\1\">#\\1</a>"
replace_usernames = "<a href=\"http://twitter.com/\\1\">@\\1</a>"
class TwitterQueryException(Exception):
pass
@python_2_unicode_compatible
class Query(models.Model):
type = models.CharField(_("Type"), choices=QUERY_TYPE_CHOICES,
max_length=10)
value = models.CharField(_("Value"), max_length=140)
interested = models.BooleanField("Interested", default=True)
class Meta:
verbose_name = _("Twitter query")
verbose_name_plural = _("Twitter queries")
ordering = ("-id",)
def __str__(self):
return "%s: %s" % (self.get_type_display(), self.value)
def run(self):
"""
Request new tweets from the Twitter API.
"""
try:
value = quote(self.value)
except KeyError:
value = self.value
urls = {
QUERY_TYPE_USER: ("https://api.twitter.com/1.1/statuses/"
"user_timeline.json?screen_name=%s"
"&include_rts=true" % value.lstrip("@")),
QUERY_TYPE_LIST: ("https://api.twitter.com/1.1/lists/statuses.json"
"?list_id=%s&include_rts=true" % value),
QUERY_TYPE_SEARCH: "https://api.twitter.com/1.1/search/tweets.json"
"?q=%s" % value,
}
try:
url = urls[self.type]
except KeyError:
raise TwitterQueryException("Invalid query type: %s" % self.type)
settings.use_editable()
auth_settings = get_auth_settings()
if not auth_settings:
from mezzanine.conf import registry
if self.value == registry["TWITTER_DEFAULT_QUERY"]["default"]:
# These are some read-only keys and secrets we use
# for the default query (eg nothing has been configured)
auth_settings = (
"KxZTRD3OBft4PP0iQW0aNQ",
"sXpQRSDUVJ2AVPZTfh6MrJjHfOGcdK4wRb1WTGQ",
"1368725588-ldWCsd54AJpG2xcB5nyTHyCeIC3RJcNVUAkB1OI",
"r9u7qS18t8ad4Hu9XVqmCGxlIpzoCN3e1vx6LOSVgyw3R",
)
else:
raise TwitterQueryException("Twitter OAuth settings missing")
try:
tweets = requests.get(url, auth=OAuth1(*auth_settings)).json()
except Exception as e:
raise TwitterQueryException("Error retrieving: %s" % e)
try:
raise TwitterQueryException(tweets["errors"][0]["message"])
except (IndexError, KeyError, TypeError):
pass
if self.type == "search":
tweets = tweets["statuses"]
for tweet_json in tweets:
remote_id = str(tweet_json["id"])
tweet, created = self.tweets.get_or_create(remote_id=remote_id)
if not created:
continue
if "retweeted_status" in tweet_json:
user = tweet_json['user']
tweet.retweeter_user_name = user["screen_name"]
tweet.retweeter_full_name = user["name"]
tweet.retweeter_profile_image_url = user["profile_image_url"]
tweet_json = tweet_json["retweeted_status"]
if self.type == QUERY_TYPE_SEARCH:
tweet.user_name = tweet_json['user']['screen_name']
tweet.full_name = tweet_json['user']['name']
tweet.profile_image_url = \
tweet_json['user']["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
else:
user = tweet_json["user"]
tweet.user_name = user["screen_name"]
tweet.full_name = user["name"]
tweet.profile_image_url = user["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
tweet.text = urlize(tweet_json["text"])
tweet.text = re_usernames.sub(replace_usernames, tweet.text)
tweet.text = re_hashtags.sub(replace_hashtags, tweet.text)
if getattr(settings, 'TWITTER_STRIP_HIGH_MULTIBYTE', False):
chars = [ch for ch in tweet.text if ord(ch) < 0x800]
tweet.text = ''.join(chars)
d = datetime.strptime(tweet_json["created_at"], date_format)
tweet.created_at = make_aware(d, utc)
try:
tweet.save()
except Warning:
pass
tweet.save()
self.interested = False
self.save()
class Tweet(models.Model):
remote_id = models.CharFi | eld(_("Twitter ID"), max_length=50)
created_at = models.DateTimeField(_("Date/time"), null=True)
text = models.TextField(_("Message"), null=True)
profile_image_url = models.URLField(_("Profile image URL"), null=True)
user_name = models.CharField(_("User name"), max_length=100, null=True)
full_name = models.CharField(_("Full name"), ma | x_length=100, null=True)
retweeter_profile_image_url = models.URLField(
_("Profile image URL (Retweeted by)"), null=True)
retweeter_user_name = models.CharField(
_("User name (Retweeted by)"), max_length=100, null=True)
retweeter_full_name = models.CharField(
_("Full name (Retweeted by)"), max_length=100, null=True)
query = models.ForeignKey("Query", related_name="tweets")
objects = TweetManager()
class Meta:
verbose_name = _("Tweet")
verbose_name_plural = _("Tweets")
ordering = ("-created_at",)
def __str__(self):
return "%s: %s" % (self.user_name, self.text)
def is_retweet(self):
return self.retweeter_user_name is not None
|
Lamzin/myPython | Algebra/matrix_extreme.py | Python | gpl-2.0 | 8,779 | 0.003303 | import random
import copy
from fractions import Fraction
def get_matrix(file_name="matrix_new.txt"):
file = open(file_name, "r")
A = []
for line in file:
A.append([Fraction(x) for x in line.split()])
return A
def print_file(A, comment = ""):
file = open("matrix_show.txt", "a")
file.write("%s\n" % comment)
for i in range(len(A)):
for j in range(len(A[i])):
file.write("%6s " % Fraction.limit_denominator(A[i][j]))
file.write("\n")
file.write("\n\n")
def transform_matrix(A):
n = len(A)
a = [[A[i][j] for j in range(n)] for i in range(n)]
cnt = 10
file_report = open("transform_report_new.txt", "w")
for i in range(n):
for count in range(cnt):
j = random.randint(0, n - 1)
if i == j:
break
kk = Fraction(random.randint(-1, 1))
if not kk == 0:
for g in range(n):
a[i][g] += kk * a[j][g]
a[g][j] -= kk * a[g][i]
file_report.write("(line: ) %i += (%i)%i\n" % (i, kk, j))
file_report.write("(column: ) %i -= (%i)%i\n" % (j, kk, i))
return a
def matrix_product(A, B):
n = len(A)
C = []
for i in range(n):
C.append([])
for j in range(n):
C[i].append(Fraction(0, 1))
for g in range(n):
C[i][j] += A[i][g] * B[g][j]
return C
def matrix_plus_k_elementary(A, k):
ans = [[A[i][j] for j in range(len(A))] for i in range(len(A))]
for i in range(len(A)):
ans[i][i] += k
return ans
def matrix_fundamental_system_of_solutions(A):
n = len(A)
a = [[A[i][j] for j in range(n)] for i in range(n)]
used = [[False, -1] for x in range(n)]
# print(a)
ii = 0
for variable in range(n):
index = -1
for i in range(ii, n):
if a[i][variable].numerator != 0:
index = i
break
if index != -1:
used[variable] = [True, ii]
a[ii], a[index] = a[index], a[ii]
k = a[ii][variable]
for g in range(n):
a[ii][g] /= k
for i in range(n):
if i == ii:
continue
if a[i][variable].numerator != 0:
kk = a[i][variable]
for j in range(n):
a[i][j] -= kk * a[ii][j]
ii += 1
ans = []
for i in range(n):
if not used[i][0]:
ans.append([])
for j in range(n):
if i == j:
ans[-1].append(Fraction(1, 1))
else:
ans[-1].append(-a[used[j][1]][i] if used[j][0] else Fraction(0, 1))
pass
return ans
def matrix_addition_to_base():
file = open("addition.txt", "r")
n, m = [int(x) for x in file.readline().split()]
a = [[Fraction(x) for x in line.split()] for line in file]
a_copy = copy.deepcopy(a)
dimV = len(a[0])
used = [False for x in range(len(a))]
for variable in range(dimV):
index = -1
for i in range(len(a)):
if a[i][variable].numerator != 0 and not used[i]:
index = i
break
if index != -1:
used[index] = True
k = a[index][variable]
for g in range(dimV):
a[index][g] /= k
pass
for ii in range(len(a)):
if index == ii:
continue
if a[ii][variable].numerator != 0:
kk = a[ii][variable]
for g in range(dimV):
a[ii][g] -= kk * a[index][g]
file.close()
file = open("addition_answer.txt", "w")
ans = []
file.write("Addition:\n")
for i in range(n, len(used)):
if used[i]:
ans.append(a_copy[i])
file.write(" ".join([str(item) for item in a_copy[i]]))
file.write("\n")
return ans
def matrix_vector_product(A, v):
n = len(A)
C = []
for i in range(n):
C.append(Fraction(0, 1))
for g in range(n):
C[i] += A[i][g] * v[g]
return C
def vector_series(A, alpha):
N = matrix_plus_k_elementary(A, Fraction(-alpha, 1))
addition_to_base = matrix_addition_to_base()
ans = []
for v in addition_to_base:
ans.append([])
curv = copy.copy(v)
for i in range(50):
ans[-1].append(curv)
curv = matrix_vector_product(N, curv)
not_null = False
for it in curv:
if it != 0:
not_null = True
if not not_null:
break
return ans
def matrix_transposed(A):
n = len(A)
aT = [[A[j][i] for j in range(n)] for i in range(n)]
return aT
def matrix_invers(A):
n = len(A)
a = copy.deepcopy(A)
for i in range(n):
for j in range(n):
a[i].append(Fraction(0))
a[i][i + n] = Fraction(1)
used = [False for x in range(n)]
for variable in range(n):
index = -1
for i in range(n):
if a[i][variable].numerator != 0 and not used[i]:
index = i
break
if index != -1:
used[variable] = True
a[variable], a[index] = a[index], a[variable]
kk = a[variable][variable]
for g in range(2*n):
a[variable][g] /= kk
for i in range(n):
if i == variable or a[i][variable].numerator == 0:
continue
kk = a[i][variable]
for g in range(2*n):
a[i][g] -= kk * a[variable][g]
invers = [[a[i][j + n] for j in range(n)] for i in range(n)]
return invers
def check_jordan_base():
A = get_matrix("matrix_input.txt")
St = get_matrix()
S = matrix_transposed(St)
print_file(S)
S_invers = matrix_invers(S)
print_file(S_invers)
product = matrix_product(matrix_product(S_invers, A), S)
print_file(product)
def print_in_file(a, file):
for item in a:
file.write("%6s " % item)
file.write("\n")
if __name__ == "__main__":
ff = open("matrix_show.txt", "w")
ff.close()
alpha = [2, 4, -5] # eigenvalues numbers
a = get_matrix()
A = transform_matrix(a)
print_file(a, "Initial matrix")
print_file(A, "Transformed matrix")
for alpha_current in alpha:
print_file([], "alpha = %i" % alpha_current)
Ae0 = matrix_plus_k_elementary(A, Fraction(-alpha_current, 1))
Ae = Ae0
print_file(Ae0, "ker N0")
fund_syst_array = []
for cnt in range(50):
fundamental_system = matrix_fundamental_system_of_solutions(Ae)
if cnt == 0 or cnt > 0 and not len(fund_syst_array[-1]) == len(fundamental_system):
fund_syst_array.append(fundamental_system)
print_file(fundamental_system, "fundamental_system Ker N^%i" % (cnt + 1))
Ae = matrix_product(Ae, Ae0)
else:
break;
chain = []
for i in range(len(fund_syst_array) - 1, 0, -1):
#print("i = %i" % i)
tmp = []
for item_chain in chain:
for item in item_chain:
tmp.append(item[-i-1])
file_addition = open("addition.txt", "w")
file_addition.write("%i %i\n" % (len(fund_syst_array[i - 1]) + len(tmp), len(fund_syst_array[i])))
for item in fund_syst_array[i - 1]:
print_in_fi | le(item, file_addition)
for item in tmp:
print_in_file(item, file_addition)
for item in fund_syst_array[i]:
print_in_file(item, file_addition)
file_addition.close()
chain.append(vector_ser | ies(A, alpha_current))
print_file([], "Chain #")
for it in chain[-1]:
print_file(it)
first_vector = []
for chain_vect in chain:
for chain_chain in chain |
ade25/wigo | src/wigo.statusapp/wigo/statusapp/setuphandlers.py | Python | mit | 802 | 0.001247 | from plone import api
from plone.app.controlpanel.security import ISecuritySchema
def setup_workspaces(portal):
mp = api.portal.get_tool(name | ='portal_membership')
# set type to custom member type
mp.setMemberAreaType('wigo.workspaces.workspace')
# set member folder name
mp.setMembersFolderById('sqa')
def setup_security(portal):
""" Add security controlpanel settings.
"""
site = api.portal.get()
#si | te security setup!
security = ISecuritySchema(site)
security.set_enable_user_folders(True)
security.use_uuid_as_userid(True)
def setupVarious(context):
if context.readDataFile('wigo.statusapp-various.txt') is None:
return
portal = api.portal.get()
setup_workspaces(portal)
# call update security
setup_security(portal)
|
nicememory/pie | pyglet/pyglet/media/drivers/directsound/adaptation.py | Python | apache-2.0 | 17,118 | 0.001402 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import ctypes
import math
import threading
from . import interface
from pyglet.debug import debug_print
from pyglet.media.events import MediaEvent
from pyglet.media.drivers.base import AbstractAudioDriver, AbstractAudioPlayer
from pyglet.media.listener import AbstractListener
from pyglet.media.threads import PlayerWorker
_debug = debug_print('debug_media')
def _convert_coordinates(coordinates):
x, y, z = coordinates
return (x, y, -z)
def _gain2db(gain):
"""
Convert linear gain in range [0.0, 1.0] to 100ths of dB.
Power gain = P1/P2
dB = 10 log(P1/P2)
dB * 100 = 1000 * log(power gain)
"""
if gain <= 0:
return -10000
return max(-10000, min(int(1000 * math.log10(min(gain, 1))), 0))
def _db2gain(db):
"""Convert 100ths of dB to linear gain."""
return math.pow(10.0, float(db)/1000.0)
class DirectSoundAudioPlayer(AbstractAudioPlayer):
# Need to cache these because pyglet API allows update separately, but
# DSound requires both to be set at once.
_cone_inner_angle = 360
_cone_outer_angle = 360
min_buffer_size = 9600
def __init__(self, driver, ds_driver, source_group, player):
super(DirectSoundAudioPlayer, self).__init__(source_group, player)
self.driver = driver
self._ds_driver = ds_driver
# Locking strategy:
# All DirectSound calls should be locked. All instance vars relating
# to buffering/filling/time/events should be locked (used by both
# application and worker thread). Other instance vars (consts and
# 3d vars) do not need to be locked.
self._lock = threading.RLock()
# Desired play state (may be actually paused due to underrun -- not
# implemented yet).
self._playing = False
# Up to one audio data may be buffered if too much data was received
# from the source that could not be written immediately into the
# buffer. See refill().
self._audiodata_buffer = None
# Theoretical write and play cursors for an infinite buffer. play
# cursor is always <= write cursor (when equal, underrun is
# happening).
self._write_cursor = 0
self._play_cursor = 0
# Cursor position of end of data. Silence is written after
# eos for one buffer size.
self._eos_cursor = None
# Indexes into DSound circular buffer. Complications ensue wrt each
# other to avoid writing over the play cursor. See get_write_size and
# write().
self._play_cursor_ring = 0
self._write_cursor_ring = 0
# List of (play_cursor, MediaEvent), in sort order
self._events = []
# List of (cursor, timestamp), in sort order (cursor gives expiry
# place of the timestamp)
self._timestamps = []
audio_format = source_group.audio_format
# DSound buffer
self._ds_buffer = self._ds_driver.create_buffer(audio_format)
self._buffer_size = self._ds_buffer.buffer_size
self._ds_buffer.current_position = 0
self.refill(self._buffer_size)
def __del__(self):
try:
self.delete()
except:
pass
| def delete(self):
if self.driver and self.driver.worker:
self.driver.worker.remove(self)
with self._lock:
self._ds_buffer = None
def play(self | ):
assert _debug('DirectSound play')
self.driver.worker.add(self)
with self._lock:
if not self._playing:
self._get_audiodata() # prebuffer if needed
self._playing = True
self._ds_buffer.play()
assert _debug('return DirectSound play')
def stop(self):
assert _debug('DirectSound stop')
with self._lock:
if self._playing:
self._playing = False
self._ds_buffer.stop()
assert _debug('return DirectSound stop')
def clear(self):
assert _debug('DirectSound clear')
with self._lock:
self._ds_buffer.current_position = 0
self._play_cursor_ring = self._write_cursor_ring = 0
self._play_cursor = self._write_cursor
self._eos_cursor = None
self._audiodata_buffer = None
del self._events[:]
del self._timestamps[:]
def refill(self, write_size):
with self._lock:
while write_size > 0:
assert _debug('refill, write_size =', write_size)
audio_data = self._get_audiodata()
if audio_data is not None:
assert _debug('write', audio_data.length)
length = min(write_size, audio_data.length)
self.write(audio_data, length)
write_size -= length
else:
assert _debug('write silence')
self.write(None, write_size)
write_size = 0
def _has_underrun(self):
return (self._eos_cursor is not None
and self._play_cursor > self._eos_cursor)
def _dispatch_new_event(self, event_name):
MediaEvent(0, event_name)._sync_dispatch_to_player(self.player)
def _get_audiodata(self):
if self._audiodata_buffer is None or self._audiodata_buffer.length == 0:
self._get_new_audiodata()
return self._audiodata_buffer
def _get_new_audiodata(self):
assert _debug('Getting new audio data buffer.')
self._audiodata_buffer = self.source_group.get_audio_data(self._buffer_size)
if self._audiodata_buffer is not None:
assert _debug('New audio data available: {} bytes'.format(self._audiodata_buffer.length))
if self._eos_cursor is not None:
self._move_write_cursor_after_eos()
self._add_audiodata_events(self._audiodata_buffer)
self._add_audiodata_timestamp(self._audiodata_buffer)
self._eos_cursor = None
elif self._eos_cursor is None:
assert _debug('No more audio data.')
self._eos_cursor |
fbradyirl/home-assistant | homeassistant/util/aiohttp.py | Python | apache-2.0 | 1,432 | 0 | """Utilities to help with aiohttp."""
import json
from urllib.parse import parse_qsl
from typing import Any, Dict, Optional
from multidict import CIMultiDict, MultiDict
class MockRequest:
"""Mock an aiohttp request."""
def __init__(
self,
content: bytes,
method: str = "GET",
status: int = 200,
headers: Optional[Dict[str, str]] = None, |
query_string: Optional[str] = None,
url: str = "",
) -> None:
"""Initialize a request."""
self.method = method
self.url = url
self.status = status
self.headers = CIMultiDict(headers or {}) # type: CIMultiDict[str]
self.query_string = query_string or ""
self._content = content
@property
def | query(self) -> "MultiDict[str]":
"""Return a dictionary with the query variables."""
return MultiDict(parse_qsl(self.query_string, keep_blank_values=True))
@property
def _text(self) -> str:
"""Return the body as text."""
return self._content.decode("utf-8")
async def json(self) -> Any:
"""Return the body as JSON."""
return json.loads(self._text)
async def post(self) -> "MultiDict[str]":
"""Return POST parameters."""
return MultiDict(parse_qsl(self._text, keep_blank_values=True))
async def text(self) -> str:
"""Return the body as text."""
return self._text
|
leaen/Codeeval-solutions | mth-to-last-element.py | Python | mit | 325 | 0.006154 | import sys
def main():
| with open(sys.argv[1]) as input_file:
for line in input_file.readlines():
input_list = list(reversed(line.strip().split(' ')))
index = int(input_list[0])
del input_l | ist[0]
print(input_list[index - 1])
if __name__ == '__main__':
main()
|
ubuntu-core/snapcraft | tests/unit/sources/test_bazaar.py | Python | gpl-3.0 | 7,406 | 0.00054 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2019 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
from unittest import mock
import fixtures
from testtools.matchers import Equals
from snapcraft.internal import sources
from tests import unit
# LP: #1733584
class TestBazaar(unit.sources.SourceTestCase): # type: ignore
def setUp(self):
super().setUp()
# Mock _get_source_details() since not all tests have a
# full repo checkout
patcher = mock.patch("snapcraft.sources.Bazaar._get_source_details")
self.mock_get_source_details = patcher.start()
self.mock_get_source_details.return_value = ""
self.addCleanup(patcher.stop)
def test_pull(self):
bzr = sources.Bazaar("lp:my-source", "source_dir")
bzr.pull()
self.mock_rmdir.assert_called_once_with("source_dir")
self.mock_run.assert_called_once_with(
["bzr", "branch", "lp:my-source", "source_dir"]
)
def test_pull_tag(self):
bzr = sources.Bazaar("lp:my-source", "source_dir", source_tag="tag")
bzr.pull()
self.mock_run.assert_called_once_with(
["bzr", "branch", "-r", "tag:tag", "lp:my-source", "source_dir"]
)
def test_pull_existing_with_tag(self):
self.mock_path_exists.return_value = True
bzr = sources.Bazaar("lp:my-source", "source_dir", source_tag="tag")
bzr.pull()
self.mock_run.assert_called_once_with(
["bzr", "pull", "-r", "tag:tag", "lp:my-source", "-d", "source_dir"]
)
def test_pull_commit(self):
bzr = sources.Bazaar("lp:my-source", "source_dir", source_commit="2")
bzr.pull()
self.mock_run.assert_called_once_with(
["bzr", "branch", "-r", "2", "lp:my-source", "source_dir"]
)
def test_pull_existing_with_commit(self):
self.mock_path_exists.return_value = True
bzr = sources.Bazaar("lp:my-source", "source_dir", source_commit="2")
bzr.pull()
self.mock_run.assert_called_once_with(
["bzr", "pull", "-r", "2", "lp:my-source", "-d", "source_dir"]
)
def test_init_with_source_branch_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceInvalidOptionError,
sources.Bazaar,
"lp:mysource",
"source_dir",
source_branch="branch",
)
self.assertThat(raised.source_type, Equals("bzr"))
self.assertThat(raised.option, Equals("source-branch"))
def test_init_with_source_depth_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceInvalidOptionError,
sources.Bazaar,
"lp://mysource",
"source_dir",
source_depth=2,
)
self.assertThat(raised.source_type, Equals("bzr"))
self.assertThat(raised.option, Equals("source-depth"))
def test_init_with_source_tag_and_commit_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceIncompatibleOptionsError,
sources.Bazaar,
"lp://mysource",
"source_dir",
source_tag="tag",
source_commit="2",
)
self.assertThat(raised.source_type, Equals("bzr"))
self.assertThat(raised.options, Equals(["source-tag", "source-commit"]))
def test_source_checksum_raises_exception(self):
raised = self.assertRaises(
sources.errors.SnapcraftSourceInvalidOptionError,
sources.Bazaar,
"lp://mysource",
"source_dir",
source_checksum="md5/d9210476aac5f367b14e513bdefdee08",
)
self.assertThat(raised.source_type, Equals("bzr"))
self.assertThat(raised.option, Equals("source-checksum"))
def test_has_source_handler_entry(self):
self.assertTrue(sources._source_handler["bzr"] is sources.Bazaar)
def test_pull_failure(self):
self.mock_run.side_effect = subprocess.CalledProcessError(1, [])
bzr = sources.Bazaar("lp:my-source", "source_dir")
raised = self.assertRaises(sources.errors.SnapcraftPullError, bzr.pull)
self.assertThat(raised.command, Equals("bzr branch lp:my-source source_dir"))
self.assertThat(raised.exit_code, Equals(1))
def get_side_effect(original_call):
def side_effect(cmd, *args, **kwargs):
if len(cmd) > 1 and cmd[1] == "revno":
return "mock-commit".encode()
elif cmd[0] == "bzr":
return
return original_call(cmd, *args, **kwargs)
return side_effect
class BazaarDetailsTestCase(unit.TestCase):
def setUp(self):
super().setUp()
self.working_tree = "bzr-working-tree"
self.source_dir = "bzr-source-dir"
os.mkdir(self.source_dir)
# Simulate that we have already branched code out.
os.mkdir(os.path.join(self.source_dir, ".bzr"))
self.fake_check_output = self.useFixture(
fixtures.MockPatch(
"subprocess.check_output",
side_effect=get_side_effect(subprocess.check_output),
)
)
self.fake_check_call = self.useFixture(
fixtures.MockPatch(
"subprocess.check_call",
side_effect=get_side_effect(subprocess.check_call),
)
)
def test_bzr_details_commit(self):
bzr = sources.Bazaar(self.working_tree, self.source_dir, silent=True)
bzr.pull()
source_details = bzr._get_source_details()
self.assertThat(source_details["source-commit"], Equals("mock-commit"))
self.fake_check_output.mock.assert_has_calls(
[
mock.call(["bzr", "revno", self.source_dir]),
mock.call(["bzr", "revno", self.source_dir]),
]
)
| self.fake_check_call.mock.assert_called_once_with(
["bzr", "pull", self.working_tree, "-d", self.source_dir],
stderr=-3,
stdout=-3,
)
def test_bzr_details_tag(self):
bzr = sources.Bazaar(
| self.working_tree, self.source_dir, source_tag="mock-tag", silent=True
)
bzr.pull()
source_details = bzr._get_source_details()
self.assertThat(source_details["source-tag"], Equals("mock-tag"))
self.fake_check_output.mock.assert_not_called()
self.fake_check_call.mock.assert_called_once_with(
[
"bzr",
"pull",
"-r",
"tag:mock-tag",
self.working_tree,
"-d",
self.source_dir,
],
stderr=-3,
stdout=-3,
)
|
sampathweb/cs109_twitterapp | app/blueprints/__init__.py | Python | mit | 48 | 0 | from | main.views imp | ort main
__all__ = ['main']
|
thaumos/ansible | lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py | Python | gpl-3.0 | 97,131 | 0.00384 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_virtualmachine
version_added: "2.1"
short_description: Manage Azure virtual machines.
description:
- Create, update, stop and start a virtual machine. Provide an existing storage account and network interface or
allow the module to create these for you. If you choose not to provide a network interface, the resource group
must contain a virtual network with at least one subnet.
- Before Ansible 2.5, this required an image found in the Azure Marketplace which can be discovered with
M(azure_rm_virtualmachineimage_facts). In Ansible 2.5 and newer, custom images can be used as well, see the
examples for more details.
- If you need to use the I(custom_data) option, many images in the marketplace are not cloud-init ready. Thus, data
sent to I(custom_data) would be ignored. If the image you are attempting to use is not listed in
U(https://docs.microsoft.com/en-us/azure/virtual-machines/linux/using-cloud-init#cloud-init-overview),
follow these steps U(https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cloudinit-prepare-custom-image).
options:
resource_group:
description:
- Name of the resource group containing the virtual machine.
required: true
name:
description:
- Name of the virtual machine.
required: true
custom_data:
description:
- Data which is made available to the virtual machine and used by e.g., cloud-init.
version_added: "2.5"
state:
description:
- Assert the state of the virtual machine.
- State C(present) will check that the machine exists with the requested configuration. If the configuration
of the existing machine does not match, the machine will be updated. Use options started, allocated and restarted to change the machine's power
state.
- State C(absent) will remove the virtual machine.
default: present
choices:
- absent
- present
started:
description:
- Use with state C(present) to start the machine. Set to false to have the machine be 'stopped'.
default: true
type: bool
allocated:
description:
- Toggle that controls if the machine is allocated/deallocated, only useful with state='present'.
default: True
type: bool
generalized:
description:
- Use with state C(present) to generalize the machine. Set to true to generalize the machine.
- Please note that this operation is irreversible.
type: bool
version_added: "2.8"
restarted:
description:
- Use with state C(present) to restart a running VM.
type: bool
location:
description:
- Valid Azure location. Defaults to location of the resource group.
short_hostname:
description:
- Name assigned internally to the host. On a linux VM this is the name returned by the `hostname` command.
When creating a virtual machine, short_hostname defaults to name.
vm_size:
description:
- A valid Azure VM size value. For example, 'Standard_D4'. The list of choices varies depending on the
subscription and location. Check your subscription for available choices. Required when creating a VM.
admin_username:
description:
- Admin username used to access the host after it is created. Required when creating a VM.
admin_password:
description:
- Password for the admin username. Not required if the os_type is Linux and SSH password authentication
is | disabled by setting ssh_password_enabled to false.
ssh_password_enabled:
description:
- When the os_type is Linux, setting ssh_password_enabled to false will disable SSH password authentication
and require use of SSH keys.
default: true
type: bool
ssh_public_keys:
description:
- "For os_type Linux provide a list of SSH keys. Each item in the list sho | uld be a dictionary where the
dictionary contains two keys: path and key_data. Set the path to the default location of the
authorized_keys files. On an Enterprise Linux host, for example, the path will be
/home/<admin username>/.ssh/authorized_keys. Set key_data to the actual value of the public key."
image:
description:
- Specifies the image used to build the VM.
- If a string, the image is sourced from a custom image based on the
name.
- 'If a dict with the keys C(publisher), C(offer), C(sku), and
C(version), the image is sourced from a Marketplace image. NOTE:
set image.version to C(latest) to get the most recent version of a
given image.'
- 'If a dict with the keys C(name) and C(resource_group), the image
is sourced from a custom image based on the C(name) and
C(resource_group) set. NOTE: the key C(resource_group) is optional
and if omitted, all images in the subscription will be searched
for by C(name).'
- Custom image support was added in Ansible 2.5
required: true
availability_set:
description:
- Name or ID of an existing availability set to add the VM to. The availability_set should be in the same resource group as VM.
version_added: "2.5"
storage_account_name:
description:
- Name of an existing storage account that supports creation of VHD blobs. If not specified for a new VM,
a new storage account named <vm name>01 will be created using storage type 'Standard_LRS'.
aliases:
- storage_account
storage_container_name:
description:
- Name of the container to use within the storage account to store VHD blobs. If no name is specified a
default container will created.
default: vhds
aliases:
- storage_container
storage_blob_name:
description:
- Name of the storage blob used to hold the VM's OS disk image. If no name is provided, defaults to
the VM name + '.vhd'. If you provide a name, it must end with '.vhd'
aliases:
- storage_blob
managed_disk_type:
description:
- Managed OS disk type
choices:
- Standard_LRS
- StandardSSD_LRS
- Premium_LRS
version_added: "2.4"
os_disk_name:
description:
- OS disk name
version_added: "2.8"
os_disk_caching:
description:
- Type of OS disk caching.
choices:
- ReadOnly
- ReadWrite
default: ReadOnly
aliases:
- disk_caching
os_disk_size_gb:
description:
- Type of OS disk size in GB.
version_added: "2.7"
os_type:
description:
- Base type of operating system.
choices:
- Windows
- Linux
default: Linux
data_disks:
description:
- Describes list of data disks.
version_added: "2.4"
suboptions:
lun:
description:
- The logical unit number for data disk
default: 0
version_added: "2.4"
disk_size_gb:
description:
- The initial disk size in GB for |
ddsc/ddsc-core | ddsc_core/migrations/0006_add_model_MeasuringMethod.py | Python | mit | 6,529 | 0.007352 | # -*- cod | ing: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MeasuringMethod'
db.create_table(u'ddsc_core_measuringmethod', (
| ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=12)),
('description', self.gf('django.db.models.fields.CharField')(unique=True, max_length=60)),
('begin_date', self.gf('django.db.models.fields.DateField')()),
('end_date', self.gf('django.db.models.fields.DateField')()),
('group', self.gf('django.db.models.fields.CharField')(max_length=60, null=True)),
('titel', self.gf('django.db.models.fields.CharField')(max_length=600, null=True)),
))
db.send_create_signal(u'ddsc_core', ['MeasuringMethod'])
def backwards(self, orm):
# Deleting model 'MeasuringMethod'
db.delete_table(u'ddsc_core_measuringmethod')
models = {
u'ddsc_core.compartment': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Compartment'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'})
},
u'ddsc_core.measuringdevice': {
'Meta': {'ordering': "[u'description']", 'object_name': 'MeasuringDevice'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.measuringmethod': {
'Meta': {'ordering': "[u'description']", 'object_name': 'MeasuringMethod'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'titel': ('django.db.models.fields.CharField', [], {'max_length': '600', 'null': 'True'})
},
u'ddsc_core.parameter': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Parameter'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'cas_number': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sikb_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True'})
},
u'ddsc_core.processingmethod': {
'Meta': {'ordering': "[u'description']", 'object_name': 'ProcessingMethod'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.referenceframe': {
'Meta': {'ordering': "[u'description']", 'object_name': 'ReferenceFrame'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.unit': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Unit'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'conversion_factor': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'dimension': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['ddsc_core'] |
ToontownUprising/src | toontown/chat/WhisperPopup.py | Python | mit | 11,313 | 0.000088 | from panda3d.core import TextNode, PGButton, Point3
from toontown.chat import ChatGlobals
from toontown.chat.ChatBalloon import ChatBalloon
from toontown.margins import MarginGlobals
from toontown.margins.MarginVisible import MarginVisible
fr | om to | ontown.nametag import NametagGlobals
from toontown.toontowngui.Clickable2d import Clickable2d
class WhisperQuitButton(Clickable2d):
CONTENTS_SCALE = 12
def __init__(self, whisperPopup):
Clickable2d.__init__(self, 'WhisperQuitButton')
self.whisperPopup = whisperPopup
self.contents.setScale(self.CONTENTS_SCALE)
self.contents.hide()
self.nodePath = None
self.update()
def destroy(self):
self.ignoreAll()
if self.nodePath is not None:
self.nodePath.removeNode()
self.nodePath = None
Clickable2d.destroy(self)
def getUniqueName(self):
return 'WhisperQuitButton-' + str(id(self))
def update(self):
if self.nodePath is not None:
self.nodePath.removeNode()
self.nodePath = None
self.contents.node().removeAllChildren()
quitButtonNode = NametagGlobals.quitButton[self.clickState]
self.nodePath = quitButtonNode.copyTo(self.contents)
def applyClickState(self, clickState):
if self.nodePath is not None:
self.nodePath.removeNode()
self.nodePath = None
quitButtonNode = NametagGlobals.quitButton[clickState]
self.nodePath = quitButtonNode.copyTo(self.contents)
def setClickState(self, clickState):
self.applyClickState(clickState)
if self.isHovering() or self.whisperPopup.isHovering():
self.contents.show()
elif self.clickState == PGButton.SDepressed:
self.contents.show()
else:
self.contents.hide()
Clickable2d.setClickState(self, clickState)
def enterDepressed(self):
base.playSfx(NametagGlobals.clickSound)
def enterRollover(self):
if self.lastClickState != PGButton.SDepressed:
base.playSfx(NametagGlobals.rolloverSound)
def updateClickRegion(self):
if self.nodePath is not None:
right = NametagGlobals.quitButtonWidth / 2.0
left = -right
top = NametagGlobals.quitButtonHeight / 2.0
bottom = -top
self.setClickRegionFrame(left, right, bottom, top)
class WhisperPopup(Clickable2d, MarginVisible):
CONTENTS_SCALE = 0.25
TEXT_MAX_ROWS = 6
TEXT_WORD_WRAP = 8
QUIT_BUTTON_SHIFT = (0.42, 0, 0.42)
WHISPER_TIMEOUT_MIN = 10
WHISPER_TIMEOUT_MAX = 20
def __init__(self, text, font, whisperType, timeout=None):
Clickable2d.__init__(self, 'WhisperPopup')
MarginVisible.__init__(self)
self.text = text
self.font = font
self.whisperType = whisperType
if timeout is None:
self.timeout = len(text) * 0.33
if self.timeout < self.WHISPER_TIMEOUT_MIN:
self.timeout = self.WHISPER_TIMEOUT_MIN
elif self.timeout > self.WHISPER_TIMEOUT_MAX:
self.timeout = self.WHISPER_TIMEOUT_MAX
else:
self.timeout = timeout
self.active = False
self.senderName = ''
self.fromId = 0
self.isPlayer = 0
self.contents.setScale(self.CONTENTS_SCALE)
self.whisperColor = ChatGlobals.WhisperColors[self.whisperType]
self.textNode = TextNode('text')
self.textNode.setWordwrap(self.TEXT_WORD_WRAP)
self.textNode.setTextColor(self.whisperColor[PGButton.SInactive][0])
self.textNode.setFont(self.font)
self.textNode.setText(self.text)
self.chatBalloon = None
self.quitButton = None
self.timeoutTaskName = self.getUniqueName() + '-timeout'
self.timeoutTask = None
self.quitEvent = self.getUniqueName() + '-quit'
self.accept(self.quitEvent, self.destroy)
self.setPriority(MarginGlobals.MP_high)
self.setVisible(True)
self.update()
self.accept('MarginVisible-update', self.update)
def destroy(self):
self.ignoreAll()
if self.timeoutTask is not None:
taskMgr.remove(self.timeoutTask)
self.timeoutTask = None
if self.chatBalloon is not None:
self.chatBalloon.removeNode()
self.chatBalloon = None
if self.quitButton is not None:
self.quitButton.destroy()
self.quitButton = None
self.textNode = None
Clickable2d.destroy(self)
def getUniqueName(self):
return 'WhisperPopup-' + str(id(self))
def update(self):
if self.chatBalloon is not None:
self.chatBalloon.removeNode()
self.chatBalloon = None
if self.quitButton is not None:
self.quitButton.destroy()
self.quitButton = None
self.contents.node().removeAllChildren()
self.draw()
if self.cell is not None:
# We're in the margin display. Reposition our content, and update
# the click region:
self.reposition()
self.updateClickRegion()
else:
# We aren't in the margin display. Disable the click region if one
# is present:
if self.region is not None:
self.region.setActive(False)
def draw(self):
if self.isClickable():
foreground, background = self.whisperColor[self.clickState]
else:
foreground, background = self.whisperColor[PGButton.SInactive]
self.chatBalloon = ChatBalloon(
NametagGlobals.chatBalloon2dModel,
NametagGlobals.chatBalloon2dWidth,
NametagGlobals.chatBalloon2dHeight, self.textNode,
foreground=foreground, background=background
)
self.chatBalloon.reparentTo(self.contents)
# Calculate the center of the TextNode:
left, right, bottom, top = self.textNode.getFrameActual()
center = self.contents.getRelativePoint(
self.chatBalloon.textNodePath,
((left+right) / 2.0, 0, (bottom+top) / 2.0))
# Translate the chat balloon along the inverse:
self.chatBalloon.setPos(self.chatBalloon, -center)
# Draw the quit button:
self.quitButton = WhisperQuitButton(self)
quitButtonNodePath = self.contents.attachNewNode(self.quitButton)
# Move the quit button to the top right of the TextNode:
quitButtonNodePath.setPos(self.contents.getRelativePoint(
self.chatBalloon.textNodePath, (right, 0, top)))
# Apply the quit button shift:
quitButtonNodePath.setPos(quitButtonNodePath, self.QUIT_BUTTON_SHIFT)
# Allow the quit button to close this whisper:
self.quitButton.setClickEvent(self.quitEvent)
def manage(self, marginManager):
MarginVisible.manage(self, marginManager)
self.timeoutTask = taskMgr.doMethodLater(
self.timeout, self.unmanage, self.timeoutTaskName, [marginManager])
def unmanage(self, marginManager):
MarginVisible.unmanage(self, marginManager)
self.destroy()
def setClickable(self, senderName, fromId, isPlayer=0):
self.senderName = senderName
self.fromId = fromId
self.isPlayer = isPlayer
self.setClickEvent('clickedWhisper', extraArgs=[fromId, isPlayer])
self.setActive(True)
def applyClickState(self, clickState):
if self.chatBalloon is not None:
foreground, background = self.whisperColor[clickState]
self.chatBalloon.setForeground(foreground)
self.chatBalloon.setBackground(background)
def setClickState(self, clickState):
if self.isClickable():
self.applyClickState(clickState)
else:
self.applyClickState(PGButton.SInactive)
if self.isHovering() or self.quitButton.isHovering():
self.quitButton.contents.show()
elif self.quitButton.getClickState() == PGButton.SDepressed:
self |
gholms/euca2ools | euca2ools/commands/iam/removeclientfromopenidconnectprovider.py | Python | bsd-2-clause | 1,921 | 0 | # Copyright (c) 2016 Hewlett Packard Enterprise Development LP
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR | IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND O | N ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from requestbuilder import Arg
from euca2ools.commands.iam import IAMRequest, AS_ACCOUNT
class RemoveClientIDFromOpenIDConnectProvider(IAMRequest):
"""
Remove a client ID from an OpenID Connect provider
"""
ARGS = [Arg('OpenIDConnectProviderArn', metavar='OIDC',
help='the ARN of the provider to update (required)'),
Arg('-c', '--client-id', dest='ClientID', metavar='CLIENT',
required=True,
help='the client ID to remove from the provider (required)'),
AS_ACCOUNT]
|
stefan2904/activismBot | activistManager/models.py | Python | mit | 477 | 0 | from django.db import models
from botManager.models import Bot
class Activist(models.Model):
identifier = models.CharField(max_length= | 200)
name = models.CharField(ma | x_length=200)
username = models.CharField(max_length=200)
reg_date = models.DateTimeField('Date registered', auto_now_add=True)
bot = models.ForeignKey(Bot) # on_delete=models.CASCADE)
def __str__(self):
return '{} ({}: {})'.format(self.name, self.bot.name, self.identifier)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.