repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
ncmatson/OSTE
|
app/views.py
|
Python
|
mit
| 2,380
| 0.006723
|
from app import app, grabber, merge, segment
from flask import render_template, request, url_for, jsonify
import cv2
import numpy as np
import os, re
def rm(dir, pattern):
for f in os.listdir(dir):
if re.search(pattern, f):
os.remove(os.path.join(dir, f))
@app.route('/')
@app.route('/index')
def home():
return render_template('index.html')
@app.route('/grabber/', methods=['POST'])
def doGrabber():
# clean up folders
rm('app/static/img', 'dg*')
rm('app/ma_prediction_400','dg*')
data = request.form
lat = data['lat']
lon = data['lon']
zoom = data['zoom']
with open('app/static/secrets.txt') as f: token = f.read()
# get the location from digital globe
g = grabber.Grabber('app/static/img', token,'png')
time = g.grab(lat, lon, zoom)
# 'smart' means that the image went through the neural net prediction script
smart_contours = segment.predict(time,'app/ma_prediction_400/dg%s.png'%(time), 'app/static/img/nn_
|
dg'+time+'.png')
smart_areas = segment.get_areas(smart_contours.values())
# 'dumb' meanas that the segment
|
ation was on the original image
dumb_contours = segment.dumb_contours('app/static/img/dg'+time+'.png','app/static/img/dumy_dg'+time+'.png')
dumb_areas = segment.get_areas(dumb_contours.values())
# uses 'smart' locations to pick out contours in the 'dumb' image
buildings = merge.intersect(smart_contours, dumb_contours)
merge.mkimage('app/static/img/dg'+time+'.png','app/static/img/merge_dg'+time+'.png', buildings)
areas = segment.get_areas(buildings.values())
url_nn = url_for('static', filename='img/nn_base_dg'+time+'.png')
url_smart = url_for('static', filename='img/nn_dg'+time+'.png')
url_dumb = url_for('static', filename='img/dumy_dg'+time+'.png')
url_merge = url_for('static', filename='img/merge_dg'+time+'.png')
# # for cameron
# dumb_contours = segment.dumb_contours('app/static/img/dg'+time+'.png','app/static/img/dumy_dg'+time+'.png')
# dumb_areas = segment.get_areas(dumb_contours.values())
# areas = dumb_areas
# url_nn = ''
# url_smart = ''
# url_merge = ''
# url_dumb = url_for('static', filename='img/dumy_dg'+time+'.png')
return jsonify(url_nn=url_nn, url_smart=url_smart, url_dumb=url_dumb, url_merge=url_merge,
areas=areas
)
|
natano/python-git-orm
|
git_orm/serializer.py
|
Python
|
isc
| 1,101
| 0
|
import re
import textwrap
__all__ = ['dumps', 'loads']
SPLIT_ITEMS = re.compile(r'\n(?!\s)').split
MATCH_ITEM = re.compile(r'''
(?P<key>\w+): # key
\s?
(?P<value>.*?)$ # first line
(?P<value2>.+)? # optional continuation line(s)
''', re.MULTILINE | re.DOTALL | re.VERBOSE).match
def dumps(data, comments={}):
s = ''
for k, v in data.
|
items():
comment = comments.get(k, None)
|
if comment:
s += '# ' + '\n '.join(comment.splitlines()) + '\n'
value = v or ''
s += '{}: {}\n'.format(k, value.replace('\n', '\n '))
return s
def loads(serialized):
data = {}
lineno = 0
for item in SPLIT_ITEMS(serialized):
if not item.startswith('#') and item.strip():
m = MATCH_ITEM(item)
if not m:
raise ValueError('syntax error on line {}'.format(lineno + 1))
value = m.group('value')
value += textwrap.dedent(m.group('value2') or '')
data[m.group('key')] = value or None
lineno += item.count('\n') + 1
return data
|
samuelcolvin/aiohttp-devtools
|
aiohttp_devtools/runserver/utils.py
|
Python
|
mit
| 732
| 0.001366
|
class MutableValue:
"""
Used to avoid warnings (and in future errors) from aiohttp when the app context is modified.
"""
__slots__ = 'value',
def __init__
|
(self, value=None):
self.value = value
def change(self, new_value):
self.value = new_value
def __len__(self):
return len(self.value)
def __repr__(self):
return repr(self
|
.value)
def __str__(self):
return str(self.value)
def __bool__(self):
return bool(self.value)
def __eq__(self, other):
return MutableValue(self.value == other)
def __add__(self, other):
return self.value + other
def __getattr__(self, item):
return getattr(self.value, item)
|
PrasannaBarate/ExpenseTracker-Django
|
DailyExpenses/migrations/0001_initial.py
|
Python
|
apache-2.0
| 998
| 0.003006
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-06 06:33
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Expense',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('expense_date', models.DateField()),
('expense_detail', models.CharField(help_text='Enter expense details', max_length=200, null=True)),
('expense_amount', models.FloatField(help_text='Enter expense amount', nu
|
ll=True)),
|
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
|
invisiblek/python-for-android
|
python3-alpha/python3-src/Lib/test/test_import.py
|
Python
|
apache-2.0
| 24,643
| 0.000203
|
import builtins
import imp
from importlib.test.import_ import test_relative_imports
from importlib.test.import_ import util as importlib_util
import marshal
import os
import py_compile
import random
import stat
import sys
import unittest
import textwrap
from test.support import (
EnvironmentVarGuard, TESTFN, check_warnings, forget, is_jython,
make_legacy_pyc, rmtree, run_unittest, swap_attr, swap_item, temp_umask,
unlink, unload)
from test import script_helper
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyo",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
def tearDown(self):
unload(TESTFN)
setUp = tearDown
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
pyo = TESTFN + ".pyo"
if is_jython:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w") as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
unlink(pyo)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_execute_bit_not_copied(self):
# Issue 6070: under posix .pyc files got their execute bit set if
# the .py file had the execute bit set, but they aren't executable.
with temp_umask(0o022):
sys.path.insert(0, os.curdir)
try:
fname = TESTFN + os.extsep + "py"
open(fname, 'w').close()
os.chmod(fname, (stat.S_IRUSR | st
|
at.S_IRGRP | stat.S_IROTH |
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH))
__import__(TESTFN)
fn = imp.cache_from_source(fname)
if not os.path.exists(fn):
self.fail("__import__ did not result in creation of "
"either a .pyc or .py
|
o file")
s = os.stat(fn)
self.assertEqual(
stat.S_IMODE(s.st_mode),
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
def test_imp_module(self):
# Verify that the imp module can correctly load and find .py files
# XXX (ncoghlan): It would be nice to use support.CleanImport
# here, but that breaks because the os module registers some
# handlers in copy_reg on import. Since CleanImport doesn't
# revert that registration, the module is left in a broken
# state after reversion. Reinitialising the module contents
# and just reverting os.environ to its previous state is an OK
# workaround
orig_path = os.path
orig_getenv = os.getenv
with EnvironmentVarGuard():
x = imp.find_module("os")
self.addCleanup(x[0].close)
new_os = imp.load_module("os", *x)
self.assertIs(os, new_os)
self.assertIs(orig_path, new_os.path)
self.assertIsNot(orig_getenv, new_os.getenv)
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc (or .pyo).
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertTrue(x is test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertTrue(y is test.support, y.__name__)
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, imp.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self
|
KyleKing/recipes
|
noxfile.py
|
Python
|
mit
| 139
| 0
|
"""nox-p
|
oetry configuration file."""
from calcipy.dev.noxfile
|
import build_check, build_dist, check_safety, coverage, tests # noqa: F401
|
klahnakoski/SpotManager
|
vendor/mo_math/hashes.py
|
Python
|
mpl-2.0
| 593
| 0
|
# encoding: utf-8
#
#
# This Source Code Form is subject
|
to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL wa
|
s not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.hashes import SHA256, Hash
def sha256(bytes):
digest = Hash(SHA256(), backend=default_backend())
digest.update(bytes)
return digest.finalize()
|
alesaccoia/chew-broadcaster
|
install-utils/release/osx/release_util.py
|
Python
|
gpl-2.0
| 13,845
| 0.004478
|
from xml.etree import ElementTree as ET
def qn_tag(n, t):
return {
'ce': str(ET.QName('http://catchexception.org/xml-namespaces/ce', t)),
'sparkle': str(ET.QName('http://www.andymatuschak.org/xml-namespaces/sparkle', t))
}[n]
def create_channel(m):
if m['stable']:
return 'stable'
else:
return '{0}/{1}'.format(m['user'], m['branch'])
def create_link(rel_channel, filename):
return 'https://builds.catchexception.org/obs-studio/{0}/{1}'.format(rel_channel, filename)
def create_version(m):
if m['stable']:
return m['tag']['name']
else:
return '{0}.{1}'.format(m['tag']['name'], m['jenkins_build'])
def create_feed(rel_channel):
rss_el = ET.Element('rss')
title = 'OBS Studio {0} channel'.format(rel_channel)
link = create_link(rel_channel, "updates.xml")
description = 'OBS Studio update channel'
channel_el = ET.SubElement(rss_el, 'channel')
ET.SubElement(channel_el, 'title').text = title
ET.SubElement(channel_el, 'link').text = link
ET.SubElement(channel_el, 'description').text = description
ET.SubElement(channel_el, 'language').text = 'en'
return rss_el
def load_or_create_feed(rel_channel):
link = create_link(rel_channel, "updates.xml")
import urllib2
feed = create_feed(rel_channel)
try:
resp = urllib2.urlopen(link)
feed = ET.fromstring(resp.read())
except urllib2.HTTPError, e:
if e.code != 404:
raise
return feed
except:
raise
return feed
def load_or_create_history(rel_channel):
link = create_link(rel_channel, "history")
import urllib2, cPickle
try:
resp = urllib2.urlopen(link)
return cPickle.loads(resp.read())
except urllib2.HTTPError, e:
if e.code != 404:
raise
return dict()
def sign_package(package, key):
from shlex import split as shplit
from subprocess import PIPE
with open(package, 'r') as f:
import subprocess
p1 = subprocess.Popen(shplit('openssl dgst -sha1 -binary'), stdin=f, stdout=PIPE)
p2 = subprocess.Popen(shplit('openssl dgst -dss1 -sign "{0}"'.format(key)), stdin=p1.stdout, stdout=PIPE)
p3 = subprocess.Popen(shplit('openssl enc -base64'), stdin=p2.stdout, stdout=PIPE)
sig = ''.join(p3.communicate()[0].splitlines())
p1.poll(), p2.poll(), p3.poll()
if p1.returncode or p2.returncode or p3.returncode:
raise RuntimeError
return sig
def load_manifest(manifest_file):
with open(manifest_file, 'r') as f:
import cPickle
return cPickle.load(f)
def populate_item(item, package, key, m, channel, package_type):
from email.utils import formatdate
import os
package_path = '{0}-{1}.zip'.format(package, package_type)
signature = sign_package(package_path, key)
user_version = create_version(m)
base_url = 'https://builds.catchexception.org/obs-studio/{0}'.format(channel)
title = 'OBS Studio {0} on {1} ({2})'.format(user_version, channel, pac
|
kage_type)
ET.SubElement(item, 'title').text = title
ET.SubElement(item, qn_tag('sparkle', 'releaseNotesLink')).text = '{0}/notes.html'.format(base_url)
ET.SubElement(item, 'pubDate').text = formatdate()
ET.SubElement(item, qn_tag('ce', 'packageType')).text = package_type
if m['stable']:
ET.SubElement(item, qn_tag('c
|
e', 'deployed')).text = 'false'
version = m['tag']['name']
else:
version = m['jenkins_build']
ET.SubElement(item, 'enclosure', {
'length': str(os.stat(package_path).st_size),
'type': 'application/octet-stream',
'url': '{0}/{1}-{2}.zip'.format(base_url, user_version, package_type),
qn_tag('ce', 'sha1'): m['sha1'],
qn_tag('sparkle', 'dsaSignature'): signature,
qn_tag('sparkle', 'shortVersionString'): user_version,
qn_tag('sparkle', 'version'): version
})
def mkdir(dirname):
import os, errno
try:
os.makedirs(dirname)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def write_tag_html(f, desc):
ul = False
for l in desc:
if not len(l):
continue
if l.startswith('*'):
ul = True
if not ul:
f.write('<ul>')
import re
f.write('<li>{0}</li>'.format(re.sub(r'^(\s*)?[*](\s*)?', '', l)))
else:
ul = False
if ul:
f.write('</ul>')
f.write('<p>{0}</p>'.format(l))
if ul:
f.write('</ul>')
def write_notes_html(f, manifest, versions, history):
# make newest to oldest
commits = [dict(sha1 = c[:40], desc = c[41:]) for c in manifest['commits']]
known_commits = set(c['sha1'] for c in commits)
commit_known = lambda commit: commit['sha1'] in known_commits
history[manifest['sha1']] = commits
from distutils.version import LooseVersion
last_tag = LooseVersion(manifest['tag']['name'])
versions = [v for v in versions if LooseVersion(v['user_version']) >= last_tag]
for v in versions:
v['commit_set'] = set(c['sha1'] for c in history.get(v['sha1'], []))
# oldest to newest
if versions:
v = versions[0]
v['commits'] = [dict(c) for c in history.get(v['sha1'], [])]
v['known'] = commit_known(v)
for c in v['commits']:
c['known'] = commit_known(c)
c['removed'] = False
for p, v in zip(versions, versions[1:]):
v['commits'] = list()
v['known'] = commit_known(v)
removed = p['commit_set'] - v['commit_set']
added = v['commit_set'] - p['commit_set']
for c in history.get(v['sha1'], []):
if c['sha1'] in added:
v['commits'].append(dict(c))
v['commits'][-1]['removed'] = False
for c in history.get(p['sha1'], [])[::-1]:
if c['sha1'] in removed:
v['commits'].append(dict(c))
v['commits'][-1]['removed'] = True
for c in v['commits']:
c['known'] = commit_known(c)
have_displayable_commits = False
for v in versions:
if v['commits']:
have_displayable_commits = True
break
f.write('''
<!DOCTYPE html>
<html>
<head>
<title>Release notes for version {0}</title>
<meta charset="utf-8">
<script>
var versions = ["{1}"];
function toggle(version)
{{
var changes = document.getElementById("changes" + version);
if (changes != null)
changes.style.display = changes.style.display == "none" ? "block" : "none";
var link = document.getElementById("toggle" + version);
if (link != null)
link.innerHTML = link.innerHTML == "[-]" ? "[+]" : "[-]";
return false;
}}
function toggle_lower(version)
{{
if (versions.indexOf(version) == -1)
return;
var version_found = false;
var captions = document.getElementsByTagName("h3");
for (var i = 0; i < captions.length; i++) {{
var parts = captions[i].id.split("caption");
if (!parts || parts.length != 2)
continue;
var rebased = captions[i].className.search(/rebased/) != -1;
var current_version = parts[1] == version;
if (version_found) {{
captions[i].className += " old";
toggle(parts[1]);
}}
if (current_version)
version_found = true;
}}
|
sjsucohort6/openstack
|
python/venv/lib/python2.7/site-packages/neutronclient/tests/unit/vpn/test_cli20_ipsecpolicy.py
|
Python
|
mit
| 8,365
| 0
|
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from neutronclient.neutron.v2_0.vpn import ipsecpolicy
from neutronclient.tests.unit import test_cli20
class CLITestV20VpnIpsecPolicyJSON(test_cli20.CLITestV20Base):
def test_create_ipsecpolicy_all_params(self):
"""vpn-ipsecpolicy-create all params with dashes."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'ipsecpolicy1'
description = 'first-ipsecpolicy1'
auth_algorithm = 'sha1'
encryption_algorithm = 'aes-256'
encapsulation_mode = 'tunnel'
pfs = 'group5'
transform_protocol = 'ah'
tenant_id = 'my-tenant'
my_id = 'my-id'
lifetime = 'units=seconds,value=20000'
|
args = [name,
'--descr
|
iption', description,
'--tenant-id', tenant_id,
'--auth-algorithm', auth_algorithm,
'--encryption-algorithm', encryption_algorithm,
'--transform-protocol', transform_protocol,
'--encapsulation-mode', encapsulation_mode,
'--lifetime', lifetime,
'--pfs', pfs]
position_names = ['name', 'auth_algorithm', 'encryption_algorithm',
'encapsulation_mode', 'description',
'transform_protocol', 'pfs',
'tenant_id']
position_values = [name, auth_algorithm, encryption_algorithm,
encapsulation_mode, description,
transform_protocol, pfs,
tenant_id]
extra_body = {
'lifetime': {
'units': 'seconds',
'value': 20000,
},
}
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
extra_body=extra_body)
def test_create_ipsecpolicy_with_limited_params(self):
"""vpn-ipsecpolicy-create with limited params."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'ipsecpolicy1'
auth_algorithm = 'sha1'
encryption_algorithm = 'aes-128'
encapsulation_mode = 'tunnel'
pfs = 'group5'
transform_protocol = 'esp'
tenant_id = 'my-tenant'
my_id = 'my-id'
args = [name,
'--tenant-id', tenant_id]
position_names = ['name', 'auth_algorithm', 'encryption_algorithm',
'encapsulation_mode',
'transform_protocol', 'pfs',
'tenant_id']
position_values = [name, auth_algorithm, encryption_algorithm,
encapsulation_mode,
transform_protocol, pfs,
tenant_id]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values)
def _test_lifetime_values(self, lifetime):
resource = 'ipsecpolicy'
cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'ipsecpolicy1'
description = 'my-ipsec-policy'
auth_algorithm = 'sha1'
encryption_algorithm = 'aes-256'
ike_version = 'v1'
phase1_negotiation_mode = 'main'
pfs = 'group5'
tenant_id = 'my-tenant'
my_id = 'my-id'
args = [name,
'--description', description,
'--tenant-id', tenant_id,
'--auth-algorithm', auth_algorithm,
'--encryption-algorithm', encryption_algorithm,
'--ike-version', ike_version,
'--phase1-negotiation-mode', phase1_negotiation_mode,
'--lifetime', lifetime,
'--pfs', pfs]
position_names = ['name', 'description',
'auth_algorithm', 'encryption_algorithm',
'phase1_negotiation_mode',
'ike_version', 'pfs',
'tenant_id']
position_values = [name, description,
auth_algorithm, encryption_algorithm,
phase1_negotiation_mode, ike_version, pfs,
tenant_id]
try:
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values)
except Exception:
return
self.fail("IPsecPolicy Lifetime Error")
def test_create_ipsecpolicy_with_invalid_lifetime_keys(self):
lifetime = 'uts=seconds,val=20000'
self._test_lifetime_values(lifetime)
def test_create_ipsecpolicy_with_invalide_lifetime_values(self):
lifetime = 'units=minutes,value=0'
self._test_lifetime_values(lifetime)
def test_list_ipsecpolicy(self):
"""vpn-ipsecpolicy-list."""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_ipsecpolicy_pagination(self):
"""vpn-ipsecpolicy-list."""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_ipsecpolicy_sort(self):
"""vpn-ipsecpolicy-list --sort-key name --sort-key id --sort-key asc
--sort-key desc
"""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_ipsecpolicy_limit(self):
"""vpn-ipsecpolicy-list -P."""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_show_ipsecpolicy_id(self):
"""vpn-ipsecpolicy-show ipsecpolicy_id."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.ShowIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_show_ipsecpolicy_id_name(self):
"""vpn-ipsecpolicy-show."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.ShowIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_update_ipsecpolicy(self):
"""vpn-ipsecpolicy-update myid --name newname --tags a b."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.UpdateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'newname'],
{'name': 'newname', })
def test_delete_ipsecpolicy(self):
"""vpn-ipsecpolicy-delete my-id."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.DeleteIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
my_
|
tgbugs/hypush
|
hyputils/memex/db/mixins.py
|
Python
|
mit
| 548
| 0
|
# -*- coding: utf-8 -*-
"""Reusable mixins for SQLAlchemy declarative models."""
from __
|
future__ import unicode_literals
import datetime
import sqlalchemy as sa
class Timestamps(object):
created = sa.Column(
sa.DateTime,
default=datetime.datetime.utcnow,
server_default=sa.func.now(),
nullable=False,
)
updated = s
|
a.Column(
sa.DateTime,
server_default=sa.func.now(),
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow,
nullable=False,
)
|
andrewyoung1991/scons
|
test/Progress/spinner.py
|
Python
|
mit
| 2,151
| 0.00093
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright n
|
otice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANT
|
ABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify output when a Progress() call is initialized with the list
that represents a canonical "spinner" on the output.
"""
import os
import TestSCons
test = TestSCons.TestSCons(universal_newlines=None)
test.write('SConstruct', r"""
env = Environment()
env['BUILDERS']['C'] = Builder(action=Copy('$TARGET', '$SOURCE'))
Progress(['-\r', '\\\r', '|\r', '/\r'])
env.C('S1.out', 'S1.in')
env.C('S2.out', 'S2.in')
env.C('S3.out', 'S3.in')
env.C('S4.out', 'S4.in')
""")
test.write('S1.in', "S1.in\n")
test.write('S2.in', "S2.in\n")
test.write('S3.in', "S3.in\n")
test.write('S4.in', "S4.in\n")
expect = """\
\\\r|\rCopy("S1.out", "S1.in")
/\r-\rCopy("S2.out", "S2.in")
\\\r|\rCopy("S3.out", "S3.in")
/\r-\rCopy("S4.out", "S4.in")
\\\r|\r"""
if os.linesep != '\n':
expect = expect.replace('\n', os.linesep)
test.run(arguments = '-Q .', stdout=expect)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
ettrig/NIPAP
|
tests/upgrade-before.py
|
Python
|
mit
| 3,381
| 0.003253
|
#!/usr/bin/env python
#
# This is run by Travis-CI before an upgrade to load some data into the
# database. After the upgrade is complete, the data is verified by
# upgrade-after.py to make sure that the upgrade of the database went smoothly.
#
import logging
import unittest
import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../pynipap')
sys.path.insert(0, '../nipap')
sys.path.insert(0, '../nipap-cli')
from nipap.backend import Nipap
from nipap.authlib import SqliteAuth
from nipap.nipapconfig import NipapConfig
from pynipap import AuthOptions, VRF, Pool, Prefix, NipapNonExistentError, NipapDuplicateError, NipapValueError
import pynipap
pynipap.xmlrpc_uri = 'http://unittest:gottatest@127.0.0.1:1337'
o = AuthOptions({
'authoritative_source': 'nipap'
})
class TestHelper:
@classmethod
def clear_database(cls):
cfg = NipapConfig('/etc/nipap/nipap.conf')
n = Nipap()
# have to delete hosts before we can delete the rest
n._execute("DELETE FROM ip_net_plan WHERE masklen(prefix) = 32")
# the rest
n._execute("DELETE FROM ip_net_plan")
# delete all except for the default VRF with id 0
n._execute("DELETE FROM ip_net_vrf WHERE id > 0")
# set default info for VRF 0
n._execute("UPDATE ip_net_vrf SET name = 'default', description = 'The default VRF, typically the Internet.' WHERE id = 0")
n._execute("DELETE FROM ip_net_pool")
n._execute("DELETE FROM ip_net_asn")
def add_prefix(self, prefix, type, description, tags=None):
if tags is None:
tags = []
p = Prefix()
p.prefix = prefix
p.type = type
p.description = description
p.tags = tags
p.save()
return p
class TestLoad(unittest.TestCase):
""" Load some data into the database
"""
def test_load_data(self):
"""
"""
th = TestHelper()
p1 = th.add_prefix('192.168.0.0/16', 'reservation', 'test')
p2 = th.add_prefix('192.168.0.0/20', 'reservation', 'test')
p3 = th.add_prefix('192.168.0.0/24', 'reservation', 'test')
p4 = th.add_prefix('192.168.1.0/24', 'reservation', 'test')
p5 = th.add_prefix('192.168.2.0/24', 'reservation', 'test')
p6 = th.add_prefix('192.168.32.0/20', 'reservation', 'test')
p7 = th.add_prefix('192.168
|
.32.0/24', 'reservation', 'test')
p8 = th.add_prefix('192.168.32.1/32', 'reservation', 'test')
ps1 = th.add_prefix('2001:db8:1::/48', 'reservation', 'test')
ps2 = th.add_prefix('2001:db8:1::/64', 'reservation', 'test')
ps3 = th.add_prefix('2001:db8:2::/48', 'reservation', 'test')
pool1 = Pool()
pool1.name = 'upgrade-test'
pool1.ipv4_default_prefix_length = 31
pool1.ipv6_default_prefix_l
|
ength = 112
pool1.save()
p2.pool = pool1
p2.save()
ps1.pool = pool1
ps1.save()
pool2 = Pool()
pool2.name = 'upgrade-test2'
pool2.save()
vrf1 = VRF()
vrf1.name = 'foo'
vrf1.rt = '123:123'
vrf1.save()
if __name__ == '__main__':
# set up logging
log = logging.getLogger()
logging.basicConfig()
log.setLevel(logging.INFO)
if sys.version_info >= (2,7):
unittest.main(verbosity=2)
else:
unittest.main()
|
gmr/tornado-elasticsearch
|
tornado_elasticsearch.py
|
Python
|
bsd-3-clause
| 46,402
| 0.000388
|
"""tornado_elasticsearch extends the official elasticsearch library adding
asynchronous support for the Tornado stack.
See http://elasticsearch-py.readthedocs.org/en/latest/ for information
on how to use the API beyond the introduction for how to use with Tornado::
from tornado import gen
from tornado import web
from tornado_elasticsearch import AsyncElasticsearch
class Info(web.RequestHandler):
@web.asynchronous
@gen.engine
def get(self, *args, **kwargs):
es = AsyncElasticsearch()
info = yield es.info()
self.finish(info)
"""
from elasticsearch.connection.base import Connection
from elasticsearch import exceptions
from elasticsearch.client import Elasticsearch
from elasticsearch.transport import Transport, TransportError
from elasticsearch.client.utils import query_params, _make_path
from tornado import concurrent
from tornado import gen
from tornado import httpclient
import logging
import time
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from tornado import version
__version__ = '0.5.0'
LOGGER = logging.getLogger(__name__)
class AsyncHttpConnection(Connection):
"""Add Tornado Asynchronous support to ElasticSearch.
:param str host: The host for the connection
:param int port: The port for the connection
:param str|tuple http_auth: optional http auth information as either a
colon delimited string ``("username:password")`` or
tuple ``(username, password)``
:param int request_timeout: optional default timeout in seconds
:arg use_ssl: use ssl for the connection if ``True``
"""
_auth_user = None
_auth_password = None
_user_agent = 'tornado_elasticsearch %s/Tornado %s' % (__version__, version)
ssl_transport_schema = 'https'
def __init__(self, host='localhost', port=9200, http_auth=None,
use_ssl=False, request_timeout=None, max_clients=10, **kwargs):
super(AsyncHttpConnection, self).__init__(host=host, port=port,
**kwargs)
self._assign_auth_values(http_auth)
self.base_url = '%s://%s:%s%s' % (self.ssl_transport_schema if use_ssl
else self.transport_schema,
host, port, self.url_prefix)
httpclient.AsyncHTTPClient.configure(None, max_clients=max_clients)
self._client = httpclient.AsyncHTTPClient()
self._headers = {'Content-Type': 'application/json; charset=UTF-8'}
self._start_time = None
self.request_timeout = request_timeout
@concurrent.return_future
def perform_request(self, method, url, params=None, body=None,
timeout=None, ignore=(), callback=None):
request_uri = self._request_uri(url, params)
LOGGER.debug('%s, %r, %r', url, body, params)
kwargs = self._request_kwargs(method, body, timeout)
self._start_time = time.time()
def on_response(response):
duration = time.time() - self._start_time
raw_data = response.body.decode('utf-8') \
if response.body is not None else None
LOGGER.info('Response from %s: %s', url, response.code)
if not (200 <= response.code < 300) and \
response.code not in ignore:
LOGGER.debug('Error: %r', raw_data)
self.log_request_fail(method, request_uri, url, body, duration,
response.code)
error = exceptions.HTTP_EXCEPTIONS.get(response.code,
TransportError)
raise error(response.code, raw_data)
self.log_request_success(method, request_uri, url, body,
response.code, raw_data, duration)
callback((response.code, response.headers, raw_data))
LOGGER.debug('Fetching [%s] %s', kwargs['method'], request_uri)
LOGGER.debug('kwargs: %r', kwargs)
self._client.fetch(httpclient.HTTPRequest(request_uri, **kwargs),
callback=on_response)
def _assign_auth_values(self, http_auth):
"""Take the http_auth value and split it into the attributes that
carry the http auth username and password
:param str|tuple http_auth: The http auth value
"""
if not http_auth:
pass
elif isinstance(http_auth, (tuple, list)):
self._auth_user, self._auth_password = http_auth
elif isinstance(http_auth, str):
self._auth_user, self._auth_password = http_auth.split(':')
else:
raise ValueError('HTTP Auth Credentials should be str or '
'tuple, not %s' % type(http_auth))
def _request_kwargs(self, method, body, timeout):
if body and method == 'GET':
method = 'POST'
kwargs = {'method': method, 'user_agent': self._user_agent,
'headers': self._headers}
if self.request_timeout is not None:
kwargs['reques
|
t_timeout'] = self.request_timeout
if self._auth_user and se
|
lf._auth_password:
kwargs['auth_username'] = self._auth_user
kwargs['auth_password'] = self._auth_password
if body:
kwargs['body'] = body
if timeout:
kwargs['request_timeout'] = timeout
kwargs['allow_nonstandard_methods'] = True
return kwargs
def _request_uri(self, url, params):
uri = self.url_prefix + url
if params:
uri = '%s?%s' % (uri, urlencode(params or {}))
return '%s%s' % (self.base_url, uri)
class AsyncTransport(Transport):
@gen.coroutine
def perform_request(self, method, url, params=None, body=None):
"""Perform the actual request. Retrieve a connection from the
connection pool, pass all the information to it's perform_request
method and return the data.
If an exception was raised, mark the connection as failed and retry (up
to `max_retries` times).
If the operation was successful and the connection used was previously
marked as dead, mark it as live, resetting it's failure count.
:param method: HTTP method to use
:param url: absolute url (without host) to target
:param params: dictionary of query parameters, will be handed over to
the underlying :class:`~torando_elasticsearch.AsyncHTTPConnection`
class for serialization
:param body: body of the request, will be serialized using serializer
and passed to the connection
"""
if body is not None:
body = self.serializer.dumps(body)
# some clients or environments don't support sending GET with body
if method in ('HEAD', 'GET') and self.send_get_body_as != 'GET':
# send it as post instead
if self.send_get_body_as == 'POST':
method = 'POST'
# or as source parameter
elif self.send_get_body_as == 'source':
if params is None:
params = {}
params['source'] = body
body = None
if body is not None:
try:
body = body.encode('utf-8')
except (UnicodeDecodeError, AttributeError):
# bytes/str - no need to re-encode
pass
ignore = ()
if params and 'ignore' in params:
ignore = params.pop('ignore')
if isinstance(ignore, int):
ignore = (ignore, )
for attempt in range(self.max_retries + 1):
connection = self.get_connection()
try:
result = yield connection.perform_request(method, url,
params, body,
ignore=ignore)
(status, headers, data) = result
|
rbramwell/pulp
|
bindings/pulp/bindings/bindings.py
|
Python
|
gpl-2.0
| 3,641
| 0.003845
|
from pulp.bindings import auth, consumer, consumer_groups, repo_groups, repository
from pulp.bindings.actions import ActionsAPI
from pulp.bindings.content import OrphanContentAPI, ContentSourceAPI, ContentCatalogAPI
from pulp.bindings.event_listeners import EventListenerAPI
from pulp.bindings.server_info import ServerInfoAPI, ServerStatusAPI
from pulp.bindings.tasks import TasksAPI, TaskSearchAPI
from pulp.bindings.upload import UploadAPI
class Bindings(object):
def __init__(self, pulp_connection):
"""
@type: pulp_connection: pulp.bindings.server.PulpConnection
"
|
""
# Please keep the following in alphabetical order to ease reading
self.actions = ActionsAPI(pulp_connection)
self.bind = consumer.BindingsAPI(pulp_connection)
self.bindings = consumer.BindingSearchAPI(pulp_connection)
self.profile = consumer.P
|
rofilesAPI(pulp_connection)
self.consumer = consumer.ConsumerAPI(pulp_connection)
self.consumer_content = consumer.ConsumerContentAPI(pulp_connection)
self.consumer_content_schedules = consumer.ConsumerContentSchedulesAPI(pulp_connection)
self.consumer_group = consumer_groups.ConsumerGroupAPI(pulp_connection)
self.consumer_group_search = consumer_groups.ConsumerGroupSearchAPI(pulp_connection)
self.consumer_group_actions = consumer_groups.ConsumerGroupActionAPI(pulp_connection)
self.consumer_group_bind = consumer_groups.ConsumerGroupBindAPI(pulp_connection)
self.consumer_group_content = consumer_groups.ConsumerGroupContentAPI(pulp_connection)
self.consumer_history = consumer.ConsumerHistoryAPI(pulp_connection)
self.consumer_search = consumer.ConsumerSearchAPI(pulp_connection)
self.content_orphan = OrphanContentAPI(pulp_connection)
self.content_source = ContentSourceAPI(pulp_connection)
self.content_catalog = ContentCatalogAPI(pulp_connection)
self.event_listener = EventListenerAPI(pulp_connection)
self.permission = auth.PermissionAPI(pulp_connection)
self.repo = repository.RepositoryAPI(pulp_connection)
self.repo_actions = repository.RepositoryActionsAPI(pulp_connection)
self.repo_distributor = repository.RepositoryDistributorAPI(pulp_connection)
self.repo_group = repo_groups.RepoGroupAPI(pulp_connection)
self.repo_group_actions = repo_groups.RepoGroupActionAPI(pulp_connection)
self.repo_group_distributor = repo_groups.RepoGroupDistributorAPI(pulp_connection)
self.repo_group_distributor_search = repo_groups.RepoGroupSearchAPI(pulp_connection)
self.repo_group_search = repo_groups.RepoGroupSearchAPI(pulp_connection)
self.repo_history = repository.RepositoryHistoryAPI(pulp_connection)
self.repo_importer = repository.RepositoryImporterAPI(pulp_connection)
self.repo_publish_schedules = repository.RepositoryPublishSchedulesAPI(pulp_connection)
self.repo_search = repository.RepositorySearchAPI(pulp_connection)
self.repo_sync_schedules = repository.RepositorySyncSchedulesAPI(pulp_connection)
self.repo_unit = repository.RepositoryUnitAPI(pulp_connection)
self.role = auth.RoleAPI(pulp_connection)
self.server_info = ServerInfoAPI(pulp_connection)
self.server_status = ServerStatusAPI(pulp_connection)
self.tasks = TasksAPI(pulp_connection)
self.tasks_search = TaskSearchAPI(pulp_connection)
self.uploads = UploadAPI(pulp_connection)
self.user = auth.UserAPI(pulp_connection)
self.user_search = auth.UserSearchAPI(pulp_connection)
|
DemocracyClub/yournextrepresentative
|
ynr/apps/elections/tests/test_viewsets.py
|
Python
|
agpl-3.0
| 994
| 0
|
import mock
from django.utils import timezone
from rest_framework.test import APIRequestFactory
from elections.api.next.api_views import BallotViewSet
class TestBallotViewSet:
def test_get_queryset_last_updated_ordered_by_modified(self):
factory = APIRequestFactory()
timestamp = timezone.now().isoformat()
re
|
quest = factory.get("/next/ballots/", {"last_updated": timestamp})
request.query_params = request.GET
view = BallotViewSet(request=request)
view.queryset = mock.MagicMock()
view.get_queryset()
view.queryset.with_last_updated.assert_called_once()
def test_get_queryset_last_updated_no
|
t_ordered(self):
factory = APIRequestFactory()
request = factory.get("/next/ballots/")
request.query_params = request.GET
view = BallotViewSet(request=request)
view.queryset = mock.MagicMock()
view.get_queryset()
view.queryset.with_last_updated.assert_not_called()
|
twatteyne/dustlink_academy
|
views/web/dustWeb/WebPageDyn.py
|
Python
|
bsd-3-clause
| 2,973
| 0.019509
|
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('WebPage')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import os
import web
from viz import Viz
import WebPage
import WebHandler
class WebHandlerDyn(WebHandler.WebHandler):
def getPage(self,subResource,username):
return self.getPageDyn(dynPath=self.getDynPath(),
subResource=subResource,
username=username)
def getData(self,subResource,username):
return self.getDataDyn(dynPath=self.getDynPath(),
subResource=subResource,
username=username)
def postData(self,receivedData,subResource,username):
return self.postDataDyn(receivedData=receivedData,
dynPath=self.getDynPath(),
subResource=subResource,
|
username=username)
def getDynPath(self):
elems = WebPage.WebPage.urlStringTolist(web.ctx.path)
for e in elems:
if e.startswith('_'):
return e[1:]
class WebPageDyn(WebPage.WebPage):
def __init__(self,subPageLister=None,
subPageHandler=None,
**fvars):
assert callable(subPageLister)
|
# store params
self.subPageLister = subPageLister
self.subPageHandler = subPageHandler
# initialize parent class
WebPage.WebPage.__init__(self,**fvars)
# register subPageHandler
self.registerPage(WebPage.WebPage(webServer = self.webServer,
url = '_[.%%\w-]*',
title = '',
webHandler = self.subPageHandler))
def getUrlHierarchy(self,parentPath=[]):
# run the parent class' function
returnVal = WebPage.WebPage.getUrlHierarchy(self,parentPath)
# modify the children
returnVal['children'] = []
for sub in self.subPageLister():
classUrl = parentPath+[self.url]+[sub['url']]
if len(classUrl) and not classUrl[0]:
classUrl = classUrl[1:]
returnVal['children'] += [
{
'url': self.urlListToString(parentPath+[self.url]+['_'+sub['url']]),
'title': sub['title'],
'class': self.webServer.getDocumentation().getClass(classUrl),
'children': [],
}
]
return returnVal
|
Horrendus/radiocontrol
|
api/api/admin.py
|
Python
|
agpl-3.0
| 820
| 0
|
# REST API Backend for the Radiocontrol Project
#
# Copyright (C) 2017 Stefan Derkits <stefan@derkits.at>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later versio
|
n.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gn
|
u.org/licenses/>.
from django.contrib import admin
# Register your models here.
|
scality/manila
|
manila_tempest_tests/tests/api/admin/test_share_types_negative.py
|
Python
|
apache-2.0
| 4,282
| 0
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import test # noqa
from tempest_lib.common.utils import data_utils # noqa
from tempest_lib import exceptions as lib_exc # noqa
from manila_tempest_tests import clients_share as clients
from manila_tempest_tests.tests.api import base
class ShareTypesAdminNegativeTest(base.BaseSharesAdminTest):
def _create_share_type(self):
name = data_utils.rand_name("unique_st_name")
extra_specs = self.add_requi
|
red_extra
|
_specs_to_dict({"key": "value"})
return self.create_share_type(name, extra_specs=extra_specs)
@classmethod
def resource_setup(cls):
super(ShareTypesAdminNegativeTest, cls).resource_setup()
cls.member_shares_client = clients.Manager().shares_client
@test.attr(type=["gate", "smoke", ])
def test_create_share_with_nonexistent_share_type(self):
self.assertRaises(lib_exc.NotFound,
self.create_share,
share_type_id=data_utils.rand_name("fake"))
@test.attr(type=["gate", "smoke", ])
def test_create_share_type_with_empty_name(self):
self.assertRaises(lib_exc.BadRequest, self.create_share_type, '')
@test.attr(type=["gate", "smoke", ])
def test_create_share_type_with_too_big_name(self):
self.assertRaises(lib_exc.BadRequest,
self.create_share_type,
"x" * 256)
@test.attr(type=["gate", "smoke", ])
def test_get_share_type_by_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.get_share_type,
data_utils.rand_name("fake"))
@test.attr(type=["gate", "smoke", ])
def test_try_delete_share_type_by_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.delete_share_type,
data_utils.rand_name("fake"))
@test.attr(type=["gate", "smoke", ])
def test_try_create_duplicate_of_share_type(self):
st = self._create_share_type()
self.assertRaises(lib_exc.Conflict,
self.create_share_type,
st["share_type"]["name"],
extra_specs=self.add_required_extra_specs_to_dict())
@test.attr(type=["gate", "smoke", ])
def test_add_share_type_allowed_for_public(self):
st = self._create_share_type()
self.assertRaises(lib_exc.Conflict,
self.shares_client.add_access_to_share_type,
st["share_type"]["id"],
self.shares_client.tenant_id)
@test.attr(type=["gate", "smoke", ])
def test_remove_share_type_allowed_for_public(self):
st = self._create_share_type()
self.assertRaises(lib_exc.Conflict,
self.shares_client.remove_access_from_share_type,
st["share_type"]["id"],
self.shares_client.tenant_id)
@test.attr(type=["gate", "smoke", ])
def test_add_share_type_by_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.add_access_to_share_type,
data_utils.rand_name("fake"),
self.shares_client.tenant_id)
@test.attr(type=["gate", "smoke", ])
def test_remove_share_type_by_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.remove_access_from_share_type,
data_utils.rand_name("fake"),
self.shares_client.tenant_id)
|
ESOedX/edx-platform
|
lms/djangoapps/grades/tests/test_course_data.py
|
Python
|
agpl-3.0
| 4,628
| 0.003025
|
"""
Tests for CourseData utility class.
"""
from __future__ import absolute_import
import six
from mock import patch
from lms.djangoapps.course_blocks.api import get_course_blocks
from openedx.core.djangoapps.content.block_structure.api import get_course_in_cache
from student.tests.factories import UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..course_data import CourseData
class CourseDataTest(ModuleStoreTestCase):
"""
Simple tests to ensure CourseData works as advertised.
"""
def setUp(self):
super(CourseDataTest, self).setUp()
with self.store.default_store(Mo
|
duleStoreEnum.Type.split):
self.course = CourseFactory.create()
# need to re-retrieve the course since the version on the original course isn't accurate.
self.course = self.store.get_course(self.course.id)
self.user = UserFactory.create()
self.collected_structure = get_course_
|
in_cache(self.course.id)
self.one_true_structure = get_course_blocks(
self.user, self.course.location, collected_block_structure=self.collected_structure,
)
self.expected_results = {
'course': self.course,
'collected_block_structure': self.collected_structure,
'structure': self.one_true_structure,
'course_key': self.course.id,
'location': self.course.location,
}
@patch('lms.djangoapps.grades.course_data.get_course_blocks')
def test_fill_course_data(self, mock_get_blocks):
"""
Tests to ensure that course data is fully filled with just a single input.
"""
mock_get_blocks.return_value = self.one_true_structure
for kwarg in self.expected_results: # We iterate instead of ddt due to dependence on 'self'
if kwarg == 'location':
continue # This property is purely output; it's never able to be used as input
kwargs = {kwarg: self.expected_results[kwarg]}
course_data = CourseData(self.user, **kwargs)
for arg in self.expected_results:
# No point validating the data we used as input, and c_b_s is input-only
if arg != kwarg and arg != "collected_block_structure":
expected = self.expected_results[arg]
actual = getattr(course_data, arg)
self.assertEqual(expected, actual)
def test_properties(self):
expected_edited_on = getattr(
self.one_true_structure[self.one_true_structure.root_block_usage_key],
'subtree_edited_on',
)
for kwargs in [
dict(course=self.course),
dict(collected_block_structure=self.one_true_structure),
dict(structure=self.one_true_structure),
dict(course_key=self.course.id),
]:
course_data = CourseData(self.user, **kwargs)
self.assertEquals(course_data.course_key, self.course.id)
self.assertEquals(course_data.location, self.course.location)
self.assertEquals(course_data.structure.root_block_usage_key, self.one_true_structure.root_block_usage_key)
self.assertEquals(course_data.course.id, self.course.id)
self.assertEquals(course_data.version, self.course.course_version)
self.assertEquals(course_data.edited_on, expected_edited_on)
self.assertIn(u'Course: course_key', six.text_type(course_data))
self.assertIn(u'Course: course_key', course_data.full_string())
def test_no_data(self):
with self.assertRaises(ValueError):
_ = CourseData(self.user)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_full_string(self):
empty_structure = get_course_blocks(self.user, self.course.location)
self.assertFalse(empty_structure)
# full_string retrieves value from collected_structure when structure is empty.
course_data = CourseData(
self.user, structure=empty_structure, collected_block_structure=self.collected_structure,
)
self.assertIn(u'Course: course_key: {}, version:'.format(self.course.id), course_data.full_string())
# full_string returns minimal value when structures aren't readily available.
course_data = CourseData(self.user, course_key=self.course.id)
self.assertIn(u'empty course structure', course_data.full_string())
|
tiramiseb/abandoned_calaos-web-installer
|
calaosapi.py
|
Python
|
agpl-3.0
| 1,520
| 0.001974
|
# Copyright 2014 Sebastien Maccagnoni-Munch
#
# This file is part of Calaos Web Installer.
#
# Calaos Web Installer is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the Li
|
cense,
# or (at your option) any later version.
#
# Calaos Web Installer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have
|
received a copy of the GNU Affero General Public License
# along with Calaos Web Installer. If not, see <http://www.gnu.org/licenses/>.
import os.path
import pickle
class CalaosApi:
def __init__(self, io, rules):
self.io_path = io
self.rules_path = rules
self.readfiles()
def readfiles(self):
if os.path.exists(self.io_path):
self.io = pickle.load(file(self.io_path))
else:
self.io = []
if os.path.exists(self.rules_path):
self.rules = pickle.load(file(self.rules_path))
else:
self.rules = []
def get_config(self):
return {
'io': self.io,
'rules': self.rules
}
def writefiles(self):
pickle.dump(self.io, file(self.io_path, 'w'))
pickle.dump(self.rules, file(self.rules_path, 'w'))
|
yanikou19/pymatgen
|
fabfile.py
|
Python
|
mit
| 4,544
| 0.001761
|
"""
Deployment file to facilitate releases of pymatgen.
Note that this file is meant to be run from the root directory of the pymatgen
repo.
"""
__author__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "Sep 1, 2014"
import glob
import os
import json
import webbrowser
import requests
import re
import subprocess
from fabric.api import local, lcd
from pymatgen import __version__ as ver
def make_doc():
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split("\-{3,}", contents)
n = len(toks[0].split()[-1])
changes = [toks[0]]
changes.append("\n" + "\n".join(toks[1].strip().split("\n")[0:-1]))
changes = ("-" * n).join(changes)
with open("docs/latest_changes.rst", "w") as f:
f.write(changes)
with lcd("examples"):
local("ipython nbconvert --to html *.ipynb")
local("mv *.html ../docs/_static")
with lcd("docs"):
local("cp ../CHANGES.rst change_log.rst")
local("sphinx-apidoc -d 6 -o . -f ../pymatgen")
local("rm pymatgen.*.tests.rst")
for f in glob.glob("docs/*.rst"):
if f.startswith('docs/pymatgen') and f.endswith('rst'):
newoutput = []
suboutput = []
subpackage = False
with open(f, 'r') as fid:
for line in fid:
clean = line.strip()
if clean == "Subpackages":
subpackage = True
if not subpackage and not clean.endswith("tests"):
newoutput.append(line)
else:
if not clean.endswith("tests"):
suboutput.append(line)
if clean.startswith("pymatgen") and not clean.endswith("tests"):
newoutput.extend(suboutput)
subpackage = False
suboutput = []
with open(f, 'w') as fid:
fid.write("".join(newoutput))
local("make html")
local("cp _static/* _build/html/_static")
#This makes sure pymatgen.org works to redirect to the Gihub page
local("echo \"pymatgen.org\" > _build/html/CNAME")
#Avoid ths use of jekyll so that _dir works as intended.
local("touch _build/html/.nojekyll")
def publish():
local("python setup.py release")
def setver():
local("sed s/version=.*,/version=\\\"{}\\\",/ setup.py > newsetup"
.format(ver))
local("mv newsetup setup.py")
def update_doc():
make_doc()
with lcd("docs/_build/html/"):
local("git add .")
local("git commit -a -m \"Update dev docs\"")
local("git push origin gh-pages")
def merge_stable():
local("git commit -a -m \"v%s release\"" % ver)
local("git push")
local("git checkout stable")
local("git pull")
local("git merge master")
local("git push")
local("git checkout master")
def release_github():
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split("\-+", contents)
desc = toks[1].strip()
payload = {
"tag_name": "v" + ver,
"target_commitish": "master",
"name": "v" + ver,
"body": desc,
"draft": False,
"prerelease": False
}
response = requests.post(
"https://api.github.com/repos/materialsproject/pymatgen/releases",
data=json.dumps(payload),
headers={"Authorization": "token " + os.environ["GITHUB_RELEASES_TOKEN"]})
print response.text
def update_changelog():
output = subprocess.check_output(["git", "log", "--pretty=format:%s",
"v%s..HEAD" % ver])
lines = ["* " + l for l in output.strip().split("\n")]
with open("CHANGES.rst") as f:
contents = f.read()
toks = contents.split("==========")
toks.insert(-1, "\n\n" + "\n".join(lines))
with open("CHANGES.rst", "w") as f:
f.write("==========".join(toks))
def log_ver():
filepath = os.path.join(os.environ["HOME"], "Dropbox", "Public",
"pymatgen", ver
|
)
with open(filepath, "w") as
|
f:
f.write("Release")
def release(skip_test=False):
setver()
if not skip_test:
local("nosetests")
publish()
log_ver()
update_doc()
merge_stable()
release_github()
def open_doc():
pth = os.path.abspath("docs/_build/html/index.html")
webbrowser.open("file://" + pth)
|
M157q/django-localflavor
|
tests/test_is.py
|
Python
|
bsd-3-clause
| 9,213
| 0.000543
|
from __future__ import unicode_literals
from django.test import SimpleTestCase
from localflavor.is_.forms import (ISIdNumberField, ISPhoneNumberField,
ISPostalCodeSelect)
class ISLocalFlavorTests(SimpleTestCase):
def test_ISPostalCodeSelect(self):
f = ISPostalCodeSelect()
out = '''<select name="foo">
<option value="101">101 Reykjav\xedk</option>
<option value="103">103 Reykjav\xedk</option>
<option value="104">104 Reykjav\xedk</option>
<option value="105">105 Reykjav\xedk</option>
<option value="107">107 Reykjav\xedk</option>
<option value="108">108 Reykjav\xedk</option>
<option value="109">109 Reykjav\xedk</option>
<option value="110">110 Reykjav\xedk</option>
<option value="111">111 Reykjav\xedk</option>
<option value="112">112 Reykjav\xedk</option>
<option value="113">113 Reykjav\xedk</option>
<option value="116">116 Kjalarnes</option>
<option value="121">121 Reykjav\xedk</option>
<option value="123">123 Reykjav\xedk</option>
<option value="124">124 Reykjav\xedk</option>
<option value="125">125 Reykjav\xedk</option>
<option value="127">127 Reykjav\xedk</option>
<option value="128">128 Reykjav\xedk</option>
<option value="129">129 Reykjav\xedk</option>
<option value="130">130 Reykjav\xedk</option>
<option value="132">132 Reykjav\xedk</option>
<option value="150">150 Reykjav\xedk</option>
<option value="155">155 Reykjav\xedk</option>
<option value="170">170 Seltjarnarnes</option>
<option value="172">172 Seltjarnarnes</option>
<option value="190">190 Vogar</option>
<option value="200">200 K\xf3pavogur</option>
<option value="201">201 K\xf3pavogur</option>
<option value="202">202 K\xf3pavogur</option>
<option value="203">203 K\xf3pavogur</option>
<option value="210">210 Gar\xf0ab\xe6r</option>
<option value="212">212 Gar\xf0ab\xe6r</option>
<option value="220">220 Hafnarfj\xf6r\xf0ur</option>
<option value="221">221 Hafnarfj\xf6r\xf0ur</option>
<option value="222">222 Hafnarfj\xf6r\xf0ur</option>
<option value="225">225 \xc1lftanes</option>
<option value="230">230 Reykjanesb\xe6r</option>
<option value="232">232 Reykjanesb\xe6r</option>
<option value="233">233 Reykjanesb\xe6r</option>
<option value="235">235 Keflav\xedkurflugv\xf6llur</option>
<option value="240">240 Grindav\xedk</option>
<option value="245">245 Sandger\xf0i</option>
<option value="250">250 Gar\xf0ur</option>
<option value="260">260 Reykjanesb\xe6r</option>
<option value="270">270 Mosfellsb\xe6r</option>
<option value="271">271 Mosfellsb\xe6r</option>
<option value="276">276 Mosfellsb\xe6r</option>
<option value="300">300 Akranes</option>
<option value="301">301 Akranes</option>
<option value="302">302 Akranes</option>
<option value="310">310 Borgarnes</option>
<option value="311">311 Borgarnes</option>
<option value="320">320 Reykholt \xed Borgarfir\xf0i</option>
<option value="340">340 Stykkish\xf3lmur</option>
<option value="345">345 Flatey \xe1 Brei\xf0afir\xf0i</option>
<option value="350">350 Grundarfj\xf6r\xf0ur</option>
<option value="355">355 \xd3lafsv\xedk</option>
<option value="356">356 Sn\xe6fellsb\xe6r</option>
<option value="360">360 Hellissandur</option>
<option value="370">370 B\xfa\xf0ardalur</option>
<option value="371">371 B\xfa\xf0ardalur</option>
<option value="380">380 Reykh\xf3lahreppur</option>
<option value="400">400 \xcdsafj\xf6r\xf0ur</option>
<option value="401">401 \xcdsafj\xf6r\xf0ur</option>
<option value="410">410 Hn\xedfsdalur</option>
<option value="4
|
15">415 Bolungarv\xedk</option>
<option value="420">420 S\xfa\xf0av\xedk</option>
<option value="425">425 Flateyri</option>
<option value="430">430 Su\xf0ureyri</option>
<option value="450">450 Patreksfj\xf6r\xf0ur</option>
<option value="451">451 Patreksfj\xf6r\xf0ur</option>
<option value="460">460 T\xe1lknafj\xf6r\xf0ur</option>
<option value="465">465 B\xedl
|
dudalur</option>
<option value="470">470 \xdeingeyri</option>
<option value="471">471 \xdeingeyri</option>
<option value="500">500 Sta\xf0ur</option>
<option value="510">510 H\xf3lmav\xedk</option>
<option value="512">512 H\xf3lmav\xedk</option>
<option value="520">520 Drangsnes</option>
<option value="522">522 Kj\xf6rvogur</option>
<option value="523">523 B\xe6r</option>
<option value="524">524 Nor\xf0urfj\xf6r\xf0ur</option>
<option value="530">530 Hvammstangi</option>
<option value="531">531 Hvammstangi</option>
<option value="540">540 Bl\xf6ndu\xf3s</option>
<option value="541">541 Bl\xf6ndu\xf3s</option>
<option value="545">545 Skagastr\xf6nd</option>
<option value="550">550 Sau\xf0\xe1rkr\xf3kur</option>
<option value="551">551 Sau\xf0\xe1rkr\xf3kur</option>
<option value="560">560 Varmahl\xed\xf0</option>
<option value="565">565 Hofs\xf3s</option>
<option value="566">566 Hofs\xf3s</option>
<option value="570">570 Flj\xf3t</option>
<option value="580">580 Siglufj\xf6r\xf0ur</option>
<option value="600">600 Akureyri</option>
<option value="601">601 Akureyri</option>
<option value="602">602 Akureyri</option>
<option value="603">603 Akureyri</option>
<option value="610">610 Greniv\xedk</option>
<option value="611">611 Gr\xedmsey</option>
<option value="620">620 Dalv\xedk</option>
<option value="621">621 Dalv\xedk</option>
<option value="625">625 \xd3lafsfj\xf6r\xf0ur</option>
<option value="630">630 Hr\xedsey</option>
<option value="640">640 H\xfasav\xedk</option>
<option value="641">641 H\xfasav\xedk</option>
<option value="645">645 Fossh\xf3ll</option>
<option value="650">650 Laugar</option>
<option value="660">660 M\xfdvatn</option>
<option value="670">670 K\xf3pasker</option>
<option value="671">671 K\xf3pasker</option>
<option value="675">675 Raufarh\xf6fn</option>
<option value="680">680 \xde\xf3rsh\xf6fn</option>
<option value="681">681 \xde\xf3rsh\xf6fn</option>
<option value="685">685 Bakkafj\xf6r\xf0ur</option>
<option value="690">690 Vopnafj\xf6r\xf0ur</option>
<option value="700">700 Egilssta\xf0ir</option>
<option value="701">701 Egilssta\xf0ir</option>
<option value="710">710 Sey\xf0isfj\xf6r\xf0ur</option>
<option value="715">715 Mj\xf3ifj\xf6r\xf0ur</option>
<option value="720">720 Borgarfj\xf6r\xf0ur eystri</option>
<option value="730">730 Rey\xf0arfj\xf6r\xf0ur</option>
<option value="735">735 Eskifj\xf6r\xf0ur</option>
<option value="740">740 Neskaupsta\xf0ur</option>
<option value="750">750 F\xe1skr\xfa\xf0sfj\xf6r\xf0ur</option>
<option value="755">755 St\xf6\xf0varfj\xf6r\xf0ur</option>
<option value="760">760 Brei\xf0dalsv\xedk</option>
<option value="765">765 Dj\xfapivogur</option>
<option value="780">780 H\xf6fn \xed Hornafir\xf0i</option>
<option value="781">781 H\xf6fn \xed Hornafir\xf0i</option>
<option value="785">785 \xd6r\xe6fi</option>
<option value="800">800 Selfoss</option>
<option value="801">801 Selfoss</option>
<option value="802">802 Selfoss</option>
<option value="810">810 Hverager\xf0i</option>
<option value="815">815 \xdeorl\xe1ksh\xf6fn</option>
<option value="816">816 \xd6lfus</option>
<option value="820">820 Eyrarbakki</option>
<option value="825">825 Stokkseyri</option>
<option value="840">840 Laugarvatn</option>
<option value="845">845 Fl\xfa\xf0ir</option>
<option value="850">850 Hella</option>
<option value="851">851 Hella</option>
<option value="860">860 Hvolsv\xf6llur</option>
<option value="861">861 Hvolsv\xf6llur</option>
<option value="870">870 V\xedk</option>
<option value="871">871 V\xedk</option>
<option value="880">880 Kirkjub\xe6jarklaustur</option>
<option value="900">900 Vestmannaeyjar</option>
<option value="902">902 Vestmannaeyjar</option>
</select>'''
self.assertHTMLEqual(f.render('foo', 'bar'), out)
def test_ISIdNumberField(self):
error_atleast = ['Ensure this value has at least 10 characters (it has 9).']
error_invalid = ['Enter a valid Icelandic identification number. The format is XXXXXX-XXXX.']
error_atmost = ['Ensure this value has at most 11 characters (it has 12).']
error_notvalid = ['The Icelandic identification number is not valid.']
valid = {
'2308803449': '230880-3449',
'230880-3449': '230880-3449',
'230880 3449': '230880-3449',
'2308803440': '230880-3440',
}
invalid = {
|
Michael-F-Bryan/spider_board
|
spider_board/gui.py
|
Python
|
mit
| 5,607
| 0.003389
|
import tkinter as tk
from tkinter.filedialog import askdirectory
from tkinter.messagebox import showwarning, showerror, showinfo
from tkinter import ttk
import logging
import sys
from threading import Thread
from spider_board.client import Browser
from spider_board.utils import time_job, LOG_FILE, get_logger, humansize
# Create the logging handlers and attach them
logger = get_logger(__name__, LOG_FILE)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
class Gui:
def __init__(self):
logger.info('Instantiating GUI')
self.root = tk.Tk()
self.browser = None
self.make_gui()
def make_gui(self):
logger.info('Building GUI')
self.main_frame = ttk.Frame(self.root)
self.main_frame.pack(expand=True, fill=tk.BOTH, pady=10, padx=10)
# Make the username label and box
ttk.Label(self.main_frame, text='Username:').grid(row=0, c
|
olumn=2)
self.username = tk.StringVar()
self.username_box = ttk.Entry(self.main_frame,
textvariable=self.username)
self.username_box.grid(row=0, column=3, sticky='nsew')
# Make the password label and box
ttk.Label(self.main_frame, text='Password:').grid(row=1, column=2)
self.password = tk.StringVar()
self.password_box = ttk.Entry(self.main_fra
|
me,
textvariable=self.password)
self.password_box.grid(row=1, column=3, sticky='nsew')
# Make the savefile label and box
self.savefile_btn = ttk.Button(self.main_frame, text='Browse',
command=self.ask_find_directory)
self.savefile_btn.grid(row=2, column=2)
self.savefile = tk.StringVar()
self.savefile_box = ttk.Entry(self.main_frame,
textvariable=self.savefile)
self.savefile_box.grid(row=2, column=3, sticky='nsew')
# Set up the column weightings
self.main_frame.columnconfigure(3, weight=1)
self.main_frame.columnconfigure(0, weight=5)
self.main_frame.rowconfigure(3, weight=1)
# Make the listbox (and scrollbar) for selecting units
self.unit_box = tk.Listbox(self.main_frame, relief=tk.SUNKEN,
selectmode=tk.EXTENDED)
self.unit_box.grid(row=0, column=0,
rowspan=5, columnspan=2,
sticky='nsew')
scrollbar = tk.Scrollbar(self.main_frame)
scrollbar.config(command=self.unit_box.yview)
self.unit_box.config(yscrollcommand=scrollbar.set)
scrollbar.grid(row=0, column=1, rowspan=5, sticky='nsew')
# Make the "login" button
self.go_button = ttk.Button(self.main_frame, text='Login',
command=self.login)
self.go_button.grid(row=4, column=2, sticky='es')
# Make the "start downloading" button
self.go_button = ttk.Button(self.main_frame, text='Start Downloading',
command=self.start_downloading)
self.go_button.grid(row=4, column=3, sticky='es')
def login(self):
logger.info('Login button pressed')
username = self.username.get()
password = self.password.get()
savefile = self.savefile.get()
# Check all required fields are filled in
if username and password and savefile:
logger.info('Attempting login')
self.browser = Browser(username, password, savefile)
self.bootstrap_browser(self.browser)
# Do the login in a different thread
Thread(target=self.browser.login).start()
else:
showwarning('Ok', 'Please fill in all necessary fields.')
logger.warn("Required fields haven't been filled in")
def start_downloading(self):
logger.info('Download button pressed')
if self.browser and self.browser.is_logged_in:
self.browser.spider_concurrent()
self.browser.download_concurrent()
else:
logger.info('Not logged in')
showerror('Ok', 'Not logged in')
def ask_find_directory(self):
save_location = askdirectory()
self.savefile.set(save_location)
def mainloop(self):
self.root.mainloop()
def quit(self):
self.root.destroy()
def update_units(self):
self.unit_box.delete(0, tk.END)
for unit in self.browser.units:
self.unit_box.insert(tk.END, unit.title)
self.root.after(1000, self.update_units)
def bootstrap_browser(self, browser):
"""
Add in any hooks to the browser so they will be run on certain events.
"""
def on_quit(browser_instance, gui):
"""Close the GUI"""
gui.quit()
def on_login_successful(browser_instance, gui):
"""Fire off an info dialog and get units (in another thread)"""
# Thread(target=browser_instance.get_units).start()
gui.root.after(0, showinfo, 'Ok', 'Login Successful')
def on_login_failed(browser_instance, gui):
"""Fire off an error dialog"""
showerror('Ok', 'Login Unsuccessful')
def on_get_units(browser_instance, gui):
gui.root.after(0, gui.update_units)
hooks = [on_quit, on_login_successful, on_login_failed,
on_get_units]
# Do the actual bootstrapping
for hook in hooks:
callback = lambda browser_instance: hook(browser_instance, self)
setattr(browser, hook.__name__, callback)
browser.on_login_failed(self)
|
deklungel/iRulez
|
old/modules/discovery/discovery.py
|
Python
|
mit
| 1,800
| 0.012222
|
#!/usr/bin/env python
import sys
sys.path.append('/var/www/html/modules/libraries')
import avahi
import dbus
from time import sleep
import mysql.connector
file = open('/var/www/html/config.php', 'r')
for line in file:
if "db_name" in line: MySQL_database = line.split('"')[3]
elif "db_user" in line: MySQL_username = line.split('"')[3]
elif "db_password" in line: MySQL_password = line.split('"')[3]
cnx = mysql.connector.connect(user=MySQL_username,password=MySQL_password,database=MySQL_database)
cursor = cnx.cursor()
query = ("SELECT Setting,value FROM Settings")
cursor.execute(query)
for (Setting, value) in cursor:
if Setting == "MQTT_ip_address":
MQTT_ip_address = value
cursor.close()
cnx.close()
class ServiceAnnounce
|
r:
def __init__(self, name, service, port, txt):
bus = dbus.SystemBus()
server = dbus.Interface(bus.get_object(avahi.DBUS_NAME, avahi.DBUS_PATH_SERVER), avahi.DBUS_INTERFACE_SERVER)
group = dbus.Interface(bus.get_object(avahi.DBUS_NAME, server.EntryGroupNew()),
|
avahi.DBUS_INTERFACE_ENTRY_GROUP)
self._service_name = name
index = 1
while True:
try:
group.AddService(avahi.IF_UNSPEC, avahi.PROTO_INET, 0, self._service_name, service, '', '', port, avahi.string_array_to_txt_array(txt))
except dbus.DBusException: # name collision -> rename
index += 1
self._service_name = '%s #%s' % (name, str(index))
else:
break
group.Commit()
def get_service_name(self):
return self._service_name
if __name__ == '__main__':
announcer = ServiceAnnouncer(MQTT_ip_address, '_irulez._tcp.', 80,'')
print announcer.get_service_name()
sleep(10000)
|
eayunstack/python-neutronclient
|
neutronclient/tests/functional/core/test_readonly_neutron.py
|
Python
|
apache-2.0
| 6,814
| 0.000294
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from tempest.lib import exceptions
from neutronclient.tests.functional import base
class SimpleReadOnlyNeutronClientTest(base.ClientTestBase):
"""This is a first pass at a simple read only python-neutronclient test.
This only exercises client commands that are read only.
This should test commands:
* as a regular user
* as a admin user
* with and without optional parameters
* initially just check return codes, and later test command outputs
"""
def test_admin_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
self.neutron,
'this-does-neutron-exist')
# NOTE(mestery): Commands in order listed in 'neutron help'
# Optional arguments:
def test_neutron_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
self.neutron,
'this-does-not-exist')
def test_neutron_net_list(self):
net_list = self.parser.listing(self.neutron('net-list'))
self.assertTableStruct(net_list, ['id', 'name', 'subnets'])
def test_neutron_ext_list(self):
ext = self.parser.listing(self.neutron('ext-list'))
self.assertTableStruct(ext, ['alias', 'name'])
def test_neutron_dhcp_agent_list_hosting_net(self):
self.neutron('dhcp-agent-list-hosting-net',
params='private')
def test_neutron_agent_list(self):
agents = self.parser.listing(self.neutron('agent-list'))
field_names = ['id', 'agent_type', 'host', 'alive', 'admin_state_up']
self.assertTableStruct(agents, field_names)
def test_neutron_floatingip_list(self):
self.neutron('floatingip-list')
def test_neutron_meter_label_list(self):
self.neutron('meter-label-list')
def test_neutron_meter_label_rule_list(self):
self.neutron('meter-label-rule-list')
def _test_neutron_lbaas_command(self, command):
try:
self.neutron(command)
except exceptions.CommandFailed as e:
if '404 Not Found' not in e.stderr:
self.fail('%s: Unexpected failure.' % command)
def test_neutron_lb_healthmonitor_list(self):
self._test_neutron_lbaas_command('lb-healthmonitor-list')
def test_neutron_lb_member_list(self):
self._test_neutron_lbaas_command('lb-member-list')
def test_neutron_lb_pool_list(self):
self._test_neutron_lbaas_command('lb-pool-list')
def test_neutron_lb_vip_list(self):
self._test_neutron_lbaas_command('lb-vip-list')
def test_neutron_net_external_list(self):
net_ext_list = self.parser.listing(self.neutron('net-external-list'))
self.assertTableStruct(net_ext_list, ['id', 'name', 'subnets'])
def test_neutron_port_list(self):
port_list = self.parser.listing(self.neutron('port-list'))
self.assertTableStruct(port_list, ['id', 'name', 'mac_address',
'fixed_ips'])
def test_neutron_quota_list(self):
self.neutron('quota-list')
def test_neutron_router_list(self):
router_list = self.parser.listing(self.neutron('router-list'))
self.assertTableStruct(router_list, ['id', 'name',
'external_gateway_info'])
def test_neutron_security_group_list(self):
security_grp = self.parser.listing(self.neutron('security-group-list'))
self.assertTableStruct(security_grp, ['id', 'name',
'security_group_rules'])
def test_neutron_security_group_rule_list(self):
security_grp = self.parser.listing(self.neutron
('security-group-rule-list'))
self.assertTableStruct(security_grp, ['id', 'security_group',
'direction', 'ethertype',
'port/protocol', 'remote'])
def test_neutron_subnet_list(self):
subnet_list = self.parser.listing(self.neutron('subnet-list'))
self.assertTableStruct(subnet_list, ['id', 'name', 'cidr',
'allocation_pools'])
def test_neutron_firewall_list(self):
|
firewall_list = self.parser.listing(self.neutron
('firewall-list'))
self.assertTableStruct(firewall_list, ['id', 'name',
'firewall_policy_id'])
def test_neutron_firewall_policy_list(self):
firewall_policy = self.parser.listing(self.neutron
('firewall-policy-list'))
self.assertTableStruct(firewall_policy, ['id', 'name',
|
'firewall_rules'])
def test_neutron_firewall_rule_list(self):
firewall_rule = self.parser.listing(self.neutron
('firewall-rule-list'))
self.assertTableStruct(firewall_rule, ['id', 'name',
'firewall_policy_id',
'summary', 'enabled'])
def test_neutron_help(self):
help_text = self.neutron('help')
lines = help_text.split('\n')
self.assertFirstLineStartsWith(lines, 'usage: neutron')
commands = []
cmds_start = lines.index('Commands for API v2.0:')
command_pattern = re.compile('^ {2}([a-z0-9\-\_]+)')
for line in lines[cmds_start:]:
match = command_pattern.match(line)
if match:
commands.append(match.group(1))
commands = set(commands)
wanted_commands = set(('net-create', 'subnet-list', 'port-delete',
'router-show', 'agent-update', 'help'))
self.assertFalse(wanted_commands - commands)
# Optional arguments:
def test_neutron_version(self):
self.neutron('', flags='--version')
def test_neutron_debug_net_list(self):
self.neutron('net-list', flags='--debug')
def test_neutron_quiet_net_list(self):
self.neutron('net-list', flags='--quiet')
|
Tomcuzz/OctaHomeAutomation
|
OctaHomeTempControl/OctaFiles/urls.py
|
Python
|
mit
| 1,656
| 0.019324
|
from OctaHomeCore.OctaFiles.urls.base import *
from OctaH
|
omeTempControl.views import *
class TempControlOctaUrls(OctaUrls):
@classmethod
def getUrls(cls):
return [
url(r'^TempControl/command/(?P<command>\w+)/$', handleTempCommand.as_view(), name='TempControlCommandWithOutDevice'),
url(r'^Te
|
mpControl/command/(?P<command>\w+)/(?P<deviceType>\w+)/(?P<deviceId>\d+)/$', handleTempCommand.as_view(), name='TempControlCommand'),
url(r'^TempControl/command/(?P<command>\w+)/(?P<house>\w+)/(?P<deviceType>\w+)/(?P<deviceId>\d+)/$', handleTempCommand.as_view(), name='TempControlCommand'),
url(r'^TempControl/command/(?P<command>\w+)/(?P<house>\w+)/(?P<room>\w+)/(?P<deviceType>\w+)/(?P<deviceId>\d+)/$', handleTempCommand.as_view(), name='TempControlCommand'),
url(r'^TempControl/page/(?P<page>\w+)/$', handleTempView.as_view(), name='TempControlPage'),
url(r'^TempControl/page/(?P<page>\w+)/(?P<deviceType>\w+)/(?P<deviceId>\d+)/$', handleTempCommand.as_view(), name='TempControlPage'),
url(r'^TempControl/page/(?P<house>\w+)/(?P<page>\w+)/$', handleTempView.as_view(), name='TempControlPage'),
url(r'^TempControl/page/(?P<house>\w+)/(?P<room>\w+)/(?P<page>\w+)/$', handleTempView.as_view(), name='TempControlPage'),
url(r'^TempControl/$', handleTempView.as_view(), name='TempControl'),
url(r'^TempControl/(?P<house>\w+)/$', handleTempView.as_view(), name='TempControl'),
url(r'^TempControl/(?P<house>\w+)/(?P<room>\w+)/$', handleTempView.as_view(), name='TempControl'),
url(r'^TempControl/(?P<house>\w+)/(?P<room>\w+)/(?P<deviceType>\w+)/(?P<deviceId>\d+)/$', handleTempView.as_view(), name='TempControl'),
]
|
libuparayil/networking-huawei
|
networking_huawei/tests/unit/drivers/ac/client/test_restclient.py
|
Python
|
apache-2.0
| 11,844
| 0
|
# Copyright (c) 2016 Huawei Technologies India Pvt Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import requests
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslotest import base
import networking_huawei.drivers.ac.client.restclient as ac_rest
from networking_huawei.drivers.ac.common import config # noqa
test_create_network_req = {'network':
{'routerExternal': False,
'networkType': 'local',
'segmentationId': None,
'adminStateUp': True,
'tenant_id': 'test-tenant',
'name': 'net1',
'physicalNetwork': None,
'serviceName': 'physnet1',
'id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
'status': 'ACTIVE',
'shared': False}}
class HuaweiACRestClientTestCase(base.BaseTestCase):
def setUp(self):
cfg.CONF.set_override('username', 'huawei_user', 'huawei_ac_config')
cfg.CONF.set_override('password', 'huawei_pwd', 'huawei_ac_config')
cfg.CONF.set_override('neutron_ip', '127.0.0.1', 'huawei_ac_config')
cfg.CONF.set_override('neutron_name', 'NS_1', 'huawei_ac_config')
super(HuaweiACRestClientTestCase, self).setUp()
self.restc = ac_rest.RestClient
self.host = cfg.CONF.huawei_ac_config.host
self.port = cfg.CONF.huawei_ac_config.port
self.url = '%s%s%s%s' % ("http://", self.host, ":", str(self.port))
def _mock_req_resp(self, status_code):
response = mock.Mock()
response.response = "OK"
response.status_code = status_code
response.errorcode = 0
response.content = jsonutils.dumps(
{'result': "ok", 'errorCode': '0', 'errorMsg': None}, indent=2)
return response
def test_rc_send_timeout(self):
methodname = 'POST'
url = '/controller/dc/esdk/v2.0/test_url'
expected_ret = {'errorCode': None, 'reason': None,
'response': None, 'status': -1}
with mock.patch.object(self.restc, 'process_request',
return_value="Timeout Exceptions"):
ret = ac_rest.RestClient().send(self.host, self.port,
methodname, url, hex(10), {})
self.assertEqual(expected_ret, ret, "Not expected return")
def test_rc_send_success(self):
methodname = 'POST'
url = '/controller/dc/esdk/v2.0/test_url'
expected_resp = {'errorCode': u'0', 'reason': None,
'response': 'ok', 'status': 204}
with mock.patch.object(self.restc,
'process_request',
return_value=self._mock_req_resp
(requests.codes.no_content)):
ret = ac_rest.RestClient().send(self.host, self.port,
methodname, url,
hex(10),
test_create_network_req)
self.assertEqual(expected_resp, ret, "Not expected response")
def test_rc_send_del_network(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
expected_resp = {'errorCode': None, 'reason': None,
'response': None, 'status': 200}
resp = self._mock_req_resp(requests.codes.ok)
resp.content = ""
with mock.patch.object(self.restc, 'process_request',
return_value=resp):
ret = ac_rest.RestClient().send(self.host, self.port,
methodname, url,
hex(10),
test_create_network_req)
self.assertEqual(expected_resp, ret, "Not expected response")
def test_rc_send_del_network_resp_valid(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
expected_resp = {'errorCode': None, 'reason': None,
'response': None, 'status': 300}
resp = self._mock_req_resp(requests.codes.multiple_choices)
with mock.patch.object(self.restc, 'process_request',
return_value=resp):
ret = ac_rest.RestClient().send(self.host, self.port,
methodname, url,
hex(10),
test_create_network_req)
self.assertEqual(expected_resp, ret, "Not expected response")
def test_rc_process_request(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
auth = (cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password)
headers = {'Accept': 'application/json',
'Content-type': 'application/json'}
data = {"network": {"routerExternal": False,
"id": "d897e21a-dfd6-4331-a5dd-7524fa421c3e",
"serviceName": "physnet1",
"status": "ACTIVE",
"shared": False,
"adminStateUp": True,
"tenant_id": "test-tenant",
"segmentationId": None,
"physicalNetwork": None,
"networkType": "local",
"name": "net1"}}
resp = self._mock_req_resp(requests.codes.no_content)
kwargs = {'url': url, 'data': data}
with mock.patch('requests.request',
return_value=resp) as mock_method:
ac_rest.RestClient().process_request(methodname, auth,
url, headers,
data)
mock_method.\
assert_called_once_with(
|
methodname,
headers={'Content-type':
'application/json',
'Accept':
'application/json'},
timeout=float(cfg.CONF.
huawei_ac_config.
request_timeout),
verify=False,
auth=(cfg.CONF.huawei_ac_config.username,
cfg.CONF.hua
|
wei_ac_config.password),
**kwargs)
def test_rc_process_request_timeout_exception(self):
methodname = 'DELETE'
url = '/controller/dc/esdk/v2.0/test_url'
auth = (cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password)
headers = {'Accept': 'application/json',
'Content-type': 'application/json'}
data = {"network": {"routerExternal": False,
"id": "d897e21a-dfd6-4331-a5dd-7524fa421c3e",
"serviceName": "physnet1",
"status": "ACTIVE",
"shared": False,
"adminStateUp": True,
"tenant_id": "test-tenant",
"segmentationId": None,
"physicalNetwork": None,
"networkType": "local",
|
alexander-bzikadze/graph_diff
|
tests/graph/test_graph_with_repetitive_nodes_with_root.py
|
Python
|
apache-2.0
| 1,211
| 0.000826
|
import unittest
from graph_diff.graph
|
import rnr_graph, lr_node
from graph_diff.graph.graph_with_repetitive_nodes_exceptions import GraphWithRepetitiveNodesKeyError
class GraphWithRepetitiveNodesWithRootTest(unittest.TestCase):
def setUp(self):
self.test_graph = rnr_graph()
def test_add_node(self):
self.assertFalse(lr_node(1, 1) in self.test_graph)
self.test_graph.add_node(lr_node(1, 1))
self.assertTrue(lr_node(1, 1) in self.test_graph)
def test_add_edge(self):
self.asser
|
tFalse(lr_node(1, 1) in self.test_graph)
self.assertFalse(lr_node(1, 2) in self.test_graph)
self.test_graph.add_edge(lr_node(1, 1), lr_node(1, 2))
self.assertTrue(lr_node(1, 1) in self.test_graph)
self.assertTrue(lr_node(1, 2) in self.test_graph)
def test_add_edge_exp(self):
self.assertFalse(lr_node(1, 1) in self.test_graph)
self.assertFalse(lr_node(1, 2) in self.test_graph)
self.assertRaises(GraphWithRepetitiveNodesKeyError,
self.test_graph.add_edge_exp,
lr_node(1, 1),
lr_node(1, 2))
if __name__ == '__main__':
unittest.main()
|
armon/pypred
|
setup.py
|
Python
|
bsd-3-clause
| 1,228
| 0.002443
|
from setuptools import setup
__version__ = "0.5.0"
# G
|
et the long description
|
by reading the README
try:
readme_content = open("README.md").read()
except:
readme_content = ""
# Create the actual setup method
setup(name='pypred',
version=__version__,
description='A Python library for simple evaluation of natural language predicates',
long_description=readme_content,
author='Armon Dadgar',
author_email='armon@kiip.me',
maintainer='Armon Dadgar',
maintainer_email='armon@kiip.me',
url="https://github.com/armon/pypred/",
license="MIT License",
keywords=["python", "predicate", "natural language"],
packages=['pypred'],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development :: Libraries"
],
install_requires=["ply>=3.4"]
)
|
jbedorf/tensorflow
|
tensorflow/python/summary/summary.py
|
Python
|
apache-2.0
| 17,400
| 0.003333
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for writing summary data, for use in analysis and visualization.
See the [Summaries and
TensorBoard](https://www.tensorflow.org/guide/summaries_and_tensorboard) guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import json_format as _json_format
# exports Summary, SummaryDescription, Event, TaggedRunMetadata, SessionLog
# pylint: disable=unused-import
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.framework.summary_pb2 import SummaryDescription
from tensorflow.core.framework.summary_pb2 import SummaryMetadata as _SummaryMetadata # pylint: enable=unused-import
from tensorflow.core.util.event_pb2 import Event
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.core.util.event_pb2 import TaggedRunMetadata
# pylint: enable=unused-import
from tensorflow.python.eager import context as _context
from tensorflow.python.framework import constant_op as _constant_op
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import gen_logging_ops as _gen_logging_ops
from tensorflow.python.ops import gen_summary_ops as _gen_summary_ops # pylint: disable=unused-import
from tensorflow.python.ops import summary_op_util as _summary_op_util
# exports FileWriter, FileWriterCache
# pylint: disable=unused-import
from tensorflow.python.summary.writer.writer import FileWriter
from tensorflow.python.summary.writer.writer_cache import FileWriterCache
# pylint: enable=unused-import
from tensorflow.python.util import compat as _compat
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['summary.scalar'])
def scalar(name, tensor, collections=None, family=None):
"""Outputs a `Summary` protocol buffer containing a single scalar value.
The generated Summary has a Tensor.proto containing the input Tensor.
Args:
name: A name for the generated node. Will also serve as the series name in
TensorBoard.
tensor: A real numeric Tensor containing a single value.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. Which contains a `Summary` protobuf.
Raises:
ValueError: If tensor has the wrong shape or type.
"""
if _summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
val = _gen_logging_ops.scalar_summary(tags=tag, values=tensor, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.image'])
def image(name, tensor, max_outputs=3, collections=None, family=None):
"""Outputs a `Summary` protocol buffer with images.
The summary has up to `max_outputs` summary values containing images. The
images are built from `tensor` which must be 4-D with shape `[batch_size,
height
|
, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. `uint8` values are unchanged. The op uses two different
normalization algorithms:
* If the input
|
values are all positive, they are rescaled so the largest one
is 255.
* If any input value is negative, the values are shifted so input value 0.0
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
The `tag` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/image'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/image/0', '*name*/image/1', etc.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_outputs: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
if _summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
val = _gen_logging_ops.image_summary(
tag=tag, tensor=tensor, max_images=max_outputs, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.histogram'])
def histogram(name, values, collections=None, family=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
Adding a histogram summary makes it possible to visualize your data's
distribution in TensorBoard. You can see a detailed explanation of the
TensorBoard histogram dashboard
[here](https://www.tensorflow.org/get_started/tensorboard_histograms).
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
if _summary_op_util.skip_summary():
return _constant_op.constant('')
with _summary_op_util.summary_scope(
name, family, values=[values],
default_name='HistogramSummary') as (tag, scope):
val = _gen_logging_ops.histogram_summary(
tag=tag, values=values, name=scope)
_summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])
return val
@tf_export(v1=['summary.audio'])
def audio(name, tensor, sample_rate, max_outputs=3, collections=None,
family=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with audio.
The summary has up to `max_outputs` summary values containing audio. The
audio is built from `tensor` which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
|
Ecotrust/PEW-EFH
|
mp/scenarios/widgets.py
|
Python
|
apache-2.0
| 4,479
| 0.014066
|
from django import forms
from django.forms.widgets import *
from django.utils.safestring import mark_safe
from madrona.analysistools.widgets import SliderWidget, DualSliderWidget
class AdminFileWidget(forms.FileInput):
"""
A FileField Widget that shows its current value if it has one.
"""
def __init__(self, attrs={}):
super(AdminFileWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
output = []
if value and hasattr(value, "name"):
filename = split(value.name)[-1]
output.append('Current File: <a href="%s" target="_blank">%s</a> : <input style="top:0px;margin-bottom:0px" type="checkbox" name="clear_%s" /> Remove </p>' % (value._get_url(), filename, name))
output.append('<p> Ch
|
ange:')
output.append(super(AdminFileWidget, self).render(name, value, attrs))
#output.append("</p>")
return mark_safe(u''.join(output))
class SliderWidgetWithTooltip(SliderWidget):
def __init__(self, min, max, step, id):
super(SliderWidgetWithTooltip, self).__init__(min, max, step)
self.id = id
def render(self, *args, **kwargs):
output = super(SliderWidgetWithTooltip
|
, self).render(*args,**kwargs)
img_id = self.id
span_id = "%s_content" %self.id
#grabbing flatblock outright as including the flatblock template tag in the output html resulted in a literal output of the template tag
from flatblocks.models import FlatBlock
try:
flatblock = str(FlatBlock.objects.get(slug=self.id).content)
except:
flatblock = ""
output = output.replace('\n', ' <img src="/media/marco/img/info.png" id="%s" class="info" />\n' %img_id, 1)
output = output.replace('\n', ' <span id="%s" style="display: none;">%s</span>\n' %(span_id, flatblock), 1)
return mark_safe(output)
class DualSliderWidgetWithTooltip(DualSliderWidget):
def __init__(self, param1, param2, min, max, step, id):
super(DualSliderWidgetWithTooltip, self).__init__(param1, param2, min, max, step)
self.id = id
def render(self, *args, **kwargs):
output = super(DualSliderWidgetWithTooltip, self).render(*args,**kwargs)
output = output.replace('\n', '<img src="/media/marco/img/info.png" id="%s" class="info" />\n' %self.id, 1)
return mark_safe(output)
class CheckboxSelectMultipleWithObjTooltip(forms.CheckboxSelectMultiple):
def __init__(self, queryset=None, attrs=None):
super(CheckboxSelectMultipleWithObjTooltip, self).__init__(attrs)
self.queryset = queryset
self.attrs = attrs
def render(self, *args, **kwargs):
output = super(CheckboxSelectMultipleWithObjTooltip, self).render(*args,**kwargs)
for obj in self.queryset:
output = output.replace(str(obj), '%s <img src="/media/marco/img/info.png" id="info_%s" class="info" />' %(str(obj), obj.objective.short_name) )
#print output
return mark_safe(output)
class CheckboxSelectMultipleWithTooltip(forms.CheckboxSelectMultiple):
def __init__(self, queryset=None, substrate=None, attrs=None):
super(CheckboxSelectMultipleWithTooltip, self).__init__(attrs)
self.queryset = queryset
self.substrate = substrate
self.attrs = attrs
def render(self, *args, **kwargs):
output = super(CheckboxSelectMultipleWithTooltip, self).render(*args,**kwargs)
for param in self.queryset:
tidal_substrate = False
try:
if param.parameter.short_name == 'substrate' and self.substrate is None and 'tidal' in self.attrs['class']:
tidal_substrate = True
except:
pass
if param.parameter.short_name == 'substrate' and self.substrate is not None:
output = output.replace(str(param), '%s <img src="/media/marco/img/info.png" id="info_%s" class="info" />' %(str(param), self.substrate) )
elif tidal_substrate:
output = output.replace(str(param), '%s <img src="/media/marco/img/info.png" id="info_tidal_substrate" class="info" />' %(str(param)) )
else:
output = output.replace(str(param), '%s <img src="/media/marco/img/info.png" id="info_%s" class="info" />' %(str(param), param.parameter.short_name) )
#print output
return mark_safe(output)
|
wdv4758h/ZipPy
|
edu.uci.python.benchmark/src/benchmarks/sympy/sympy/physics/mechanics/tests/test_functions.py
|
Python
|
bsd-3-clause
| 5,068
| 0.004144
|
from sympy import S, Integral, sin, cos, pi, sqrt, symbols
from sympy.physics.mechanics import (Dyadic, Particle, Point, ReferenceFrame,
RigidBody, Vector)
from sympy.physics.mechanics import (angular_momentum, dynamicsymbols,
inertia, inertia_of_point_mass,
kinetic_energy, linear_momentum, \
outer, potential_energy)
from sympy.physics.mechanics.functions import _mat_inv_mul
from sympy.utilities.pytest import raises
Vector.simp = True
q1, q2, q3, q4, q5 = symbols('q1 q2 q3 q4 q5')
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [q1, N.z])
B = A.orientnew('B', 'Axis', [q2, A.x])
C = B.orientnew('C', 'Axis', [q3, B.y])
def test_inertia():
N = ReferenceFrame('N')
ixx, iyy, izz = symbols('ixx iyy izz')
ixy, iyz, izx = symbols('ixy iyz izx')
assert inertia(N, ixx, iyy, izz) == (ixx * (N.x | N.x) + iyy *
(N.y | N.y) + izz * (N.z | N.z))
assert inertia(N, 0, 0, 0) == 0 * (N.x | N.x)
assert inertia(N, ixx, iyy, izz, ixy, iyz, izx) == (ixx * (N.x | N.x) +
ixy * (N.x | N.y) + izx * (N.x | N.z) + ixy * (N.y | N.x) + iyy *
(N.y | N.y) + iyz * (N.y | N.z) + izx * (N.z | N.x) + iyz * (N.z |
N.y) + izz * (N.z | N.z))
def test_inertia_of_point_mass():
r, s, t, m = symbols('r s t m')
N = ReferenceFrame('N')
px = r * N.x
I = inertia_of_point_mass(m, px, N)
assert I == m * r**2 * (N.y | N.y) + m * r**2 * (N.z | N.z)
py = s * N.y
I = inertia_of_point_mass(m, py, N)
assert I == m * s**2 * (N.x | N.x) + m * s**2 * (N.z | N.z)
pz = t * N.z
I = inertia_of_point_mass(m, pz, N)
assert I == m * t**2 * (N.x | N.x) + m * t**2 * (N.y | N.y)
p = px + py + pz
I = inertia_of_point_mass(m, p, N)
assert I == (m * (s**2 + t**2) * (N.x | N.x) -
m * r * s * (N.x | N.y) -
m * r * t * (N.x | N.z) -
m * r * s * (N.y | N.x) +
m * (r**2 + t**2) * (N.y | N.y) -
m * s * t * (N.y | N.z) -
m * r * t * (N.z | N.x) -
m * s * t * (N.z | N.y) +
m * (r**2 + s**2) * (N.z | N.z))
def test_linear_momentum():
N = ReferenceFrame('N')
Ac = Point('Ac')
Ac.set_vel(N, 25 * N.y)
I = outer(N.x, N.x)
A = RigidBody('A', Ac, N, 20, (I, Ac))
P = Point('P')
Pa = Particle('Pa', P, 1)
Pa.point.set_vel(N, 10 * N.x)
assert linear_momentum(N, A, Pa) == 10 * N.x + 500 * N.y
def test_angular_momentum_and_linear_momentum():
m, M, l1 = symbols('m M l1')
q1d = dynamicsymbols('q1d')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.x)
Ac = O.locatenew('Ac', l1 * N.x)
P = Ac.locatenew('P', l1 * N.x)
a = ReferenceFrame('a')
a.set_ang_vel(N, q1d * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
I = outer(N.z, N.z)
|
A = RigidBody('A', Ac, a, M, (I, Ac))
assert linear_momentum(
N, A, Pa) == 2 * m * q1d* l1 * N.y + M * l1 * q1d * N.y
assert angular_momentum(
O, N, A, Pa) == 4 * m * q1d * l1**2 * N.z + q1d * N.z
def test_kinetic_energy():
m, M, l1 = symbols('m M l1')
omega = dynamicsymbols('omega')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.
|
x)
Ac = O.locatenew('Ac', l1 * N.x)
P = Ac.locatenew('P', l1 * N.x)
a = ReferenceFrame('a')
a.set_ang_vel(N, omega * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
I = outer(N.z, N.z)
A = RigidBody('A', Ac, a, M, (I, Ac))
assert 0 == kinetic_energy(N, Pa, A) - (M*l1**2*omega**2/2
+ 2*l1**2*m*omega**2 + omega**2/2)
def test_potential_energy():
m, M, l1, g, h, H = symbols('m M l1 g h H')
omega = dynamicsymbols('omega')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.x)
Ac = O.locatenew('Ac', l1 * N.x)
P = Ac.locatenew('P', l1 * N.x)
a = ReferenceFrame('a')
a.set_ang_vel(N, omega * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
I = outer(N.z, N.z)
A = RigidBody('A', Ac, a, M, (I, Ac))
Pa.set_potential_energy(m * g * h)
A.set_potential_energy(M * g * H)
assert potential_energy(A, Pa) == m * g * h + M * g * H
def test_mat_inv_mul():
# Uses SymPy generated primes as matrix entries, so each entry in
# each matrix should be symbolic and unique, allowing proper comparison.
# Checks _mat_inv_mul against Matrix.inv / Matrix.__mul__.
from sympy import Matrix, prime
# going to form 3 matrices
# 1 n x n
# different n x n
# 1 n x 2n
n = 3
m1 = Matrix(n, n, lambda i, j: prime(i * n + j + 2))
m2 = Matrix(n, n, lambda i, j: prime(i * n + j + 5))
m3 = Matrix(n, n, lambda i, j: prime(i + j * n + 2))
assert _mat_inv_mul(m1, m2) == m1.inv() * m2
assert _mat_inv_mul(m1, m3) == m1.inv() * m3
|
ArchiveTeam/panoramio-discovery
|
discover.py
|
Python
|
unlicense
| 3,093
| 0.00097
|
'''Find valid tags and usernames.
The file will contain things like:
tag:12345:romance
'''
import gzip
import re
import requests
import string
import sys
import time
import random
DEFAULT_HEADERS = {'User-Agent': 'ArchiveTeam'}
class FetchError(Exception):
'''Custom error class when fetching does not meet our expectation.'''
def main():
# Take the program arguments given to this script
# Normal programs use 'argparse' but this keeps things simple
start_num = int(sys.argv[1])
end_num = int(sys.argv[2])
output_filename = sys.argv[3] # this should be something like myfile.txt.gz
assert start_num <= end_num
print('Starting', start_num, end_num)
gzip_file = gzip.GzipFile(output_filename, 'wb')
for shortcode in check_range(start_num, end_num):
|
# Write the valid result one per line to the file
line = '{0}\n'.format(shortco
|
de)
gzip_file.write(line.encode('ascii'))
gzip_file.close()
print('Done')
def check_range(start_num, end_num):
'''Check if page exists.
Each line is like tag:12345:romance
'''
for num in range(start_num, end_num + 1):
shortcode = num
url = 'http://www.panoramio.com/user/{0}'.format(shortcode)
counter = 0
while True:
# Try 20 times before giving up
if counter > 20:
# This will stop the script with an error
raise Exception('Giving up!')
try:
text = fetch(url)
except FetchError:
# The server may be overloaded so wait a bit
print('Sleeping... If you see this')
time.sleep(10)
else:
if text:
for user in extract_user(text) for tag in extract_tags(text):
yield 'tag:{0}:{1}'.format(user, tag)
break # stop the while loop
counter += 1
def fetch(url):
'''Fetch the URL and check if it returns OK.
Returns True, returns the response text. Otherwise, returns None
'''
print('Fetch', url)
response = requests.get(url, headers=DEFAULT_HEADERS)
# response doesn't have a reason attribute all the time??
print('Got', response.status_code, getattr(response, 'reason'))
if response.status_code == 200:
# The item exists
if not response.text:
# If HTML is empty maybe server broke
raise FetchError()
return response.text
elif response.status_code == 404:
# Does not exist
return
else:
# Problem
raise FetchError()
def extract_user(text):
'''Return a list of tags from the text.'''
# Search for <a href="/user/1707816/tags/Bell%27Italia">Bell'Italia</a>
return re.findall(r'"/user/([^/]+)/tags/', text)
def extract_tags(text):
'''Return a list of tags from the text.'''
# Search for <a href="/user/1707816/tags/Bell%27Italia">Bell'Italia</a>
return re.findall(r'"/user/[0-9]+/tags/([^"]+)"', text)
if __name__ == '__main__':
main()
|
wilblack/lilybot
|
rpi_client/bot_roles/local_settings_generic.py
|
Python
|
gpl-2.0
| 242
| 0.028926
|
"""
Router.py uses bot_packages in this file to setup command and sensor value routing to the correct bot_role.
"""
settings= {
"bot_name":"rp4.solalla.
|
ardyh",
"bot_roles":"bot",
"bot_packag
|
es":[],
"subscriptions":[],
}
|
Camiloasc1/AstronomyUNAL
|
CelestialMechanics/kepler/constants.py
|
Python
|
mit
| 503
| 0
|
from astropy i
|
mport units as u
K_kepler = 0.01720209895 # ua^(3/2) m_{sun} d^(−1)
K = 0.01720209908 * u.au ** (3 / 2) / u.d # ua^(3/2) d^(−1)
UA = 149597870700 * u.m # m
GM1 = 1.32712442099E20 * u.m ** 3 / u.s ** 2 # m^(3) s^(−2)
# m1/m2
Mercury = 6023600
Venus = 408523.719
Earth_Moon = 328900.561400
Mars = 3098703.59
Jupiter = 1047.348644
Saturn = 3497.9018
Uranus = 22902.98
Neptune = 19412.26
Pluto = 136566000
Eris = 1191000
|
00
Ceres = 2119000000
Palas = 9700000000
Vesta = 7400000000
|
umitproject/network-admin
|
netadmin/utils/charts/charttools.py
|
Python
|
agpl-3.0
| 2,781
| 0.010068
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Adriano Monteiro Marques
#
# Author: Piotrek Wasilewski <wasi
|
lewski.piotrek@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
#
|
it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from core import Chart, ChartColumn
CHART_TOOLS_PACKAGES = ['corechart', 'gauge', 'geochart', 'table', \
'treemap', 'annotatedtimeline']
class InvalidChartsPackage(Exception):
pass
class DatatableColumn(ChartColumn):
"""
"""
type_name = ''
def __init__(self, name, data):
self.name = name
self._data = data
def format(self, value):
return value
def get_data(self):
return [self.format(value) for value in self._data]
data = property(get_data)
class NumberColumn(DatatableColumn):
type_name = 'number'
class StringColumn(DatatableColumn):
type_name = 'string'
def format(self, value):
return "'%s'" % value
class DateColumn(DatatableColumn):
type_name = 'date'
def format(self, value):
return 'new Date(%i, %i, %i)' % \
(value.year, value.month, value.day)
class DatetimeColumn(DatatableColumn):
type_name = 'datetime'
def format(self, value):
return 'new Date(%i, %i, %i, %i, %i, %i)' % \
(value.year, value.month, value.day,
value.hour, value.minute, value.second)
class ChartToolsChart(Chart):
"""
"""
chart_type = ''
def add_column(self, name, data, column_class):
col = column_class(name, data)
self.columns.append(col)
return col
def num_rows(self):
if self.columns:
# we assume that all columns have the same length
return len(self.columns[0])
return 0
class LineChart(ChartToolsChart):
chart_type = 'LineChart'
class ColumnChart(ChartToolsChart):
chart_type = 'ColumnChart'
class ScatterChart(ChartToolsChart):
chart_type = 'ScatterChart'
class AnnotatedTimeLine(ChartToolsChart):
chart_type = 'AnnotatedTimeLine'
class PieChart(ChartToolsChart):
chart_type = 'PieChart'
|
LBenzahia/cltk
|
cltk/stem/akkadian/stem.py
|
Python
|
mit
| 2,502
| 0.000402
|
"""
Get the stem of a word, given a declined form and its gender.
TODO: Check this logic with von Soden's Grundriss der akkadischen Grammatik.
TODO: Deal with j/y issue.
"""
__author__ = ['M. Willis Monroe <willismonroe@gmail.com>']
__license__ = 'MIT License. See LICENSE.'
ENDINGS = {
'm': {
'singular': {
'nominative': 'um',
'accusative': 'am',
'genitive': 'im'
},
'dual': {
'nominative': 'ān',
'oblique': 'īn'
},
'plural': {
'nominative': 'ū',
'oblique': 'ī'
}
},
'f': {
'singular': {
'nominative': 'tum',
'accusative': 'tam',
'genitive': 'tim'
},
'dual': {
'nominative': 'tān',
'oblique': 'tīn'
},
'plural': {
'nominative': ['ātum', 'ētum', 'ītum'],
'oblique': ['ātim', 'ētim', 'ītum']
}
}
}
class Stemmer(object):
"""Stem Akkadian words with a simple algorithm based on Huehnergard"""
def __init__(self):
self.endings = ENDINGS
def get_stem(self, noun, gender, mimation=True):
"""Return the stem of a noun, given its gender"""
stem = ''
if mimation and noun[-1:] == 'm':
# noun = noun[:-1]
pass
# Take off ending
if gender == 'm':
if noun[-2:] in list(self.endings['m']['singular'].values()) + \
list(self.endings['m']['dual'].values()):
stem = noun[:-2]
elif noun[-1] in list(self.endings['m']['plural'].values()):
stem = noun[:-1]
else:
print("Unknown masculine noun: {}".format(noun))
elif gender == 'f':
if noun[-4:] in self.endings['f']['plural']['nominative'] + \
self.endings['f']['plural']['oblique']:
stem = noun[:-4] + 't'
elif noun[-3:] in list(self.endings['f']['singular'].values()) + \
list(self.endings['f']['dual'].values()):
stem = noun[:-3] + 't'
elif noun[-2:] in list(self.endings['m']['si
|
ngular'].values()) + \
list(self.endings['m']['dual'].values()):
stem = noun[:-2]
else:
print("Unknown feminine noun: {}".format(noun))
else:
print("Unknown noun: {}".
|
format(noun))
return stem
|
CtopCsUtahEdu/chill-dev
|
examples/chill/testcases/include.script.py
|
Python
|
gpl-3.0
| 129
| 0.007752
|
from chill import *
source('include.c')
destination('includemodified.c')
procedure('main')
loop(0)
original()
|
print_code()
|
|
doctorrabb/badtheme
|
detector.py
|
Python
|
gpl-3.0
| 949
| 0.036881
|
#!/usr/bin/python
from sys import argv
from mod
|
ules.helpers.wpdetector import WordpressDetector
from modules.net.scan import is_good_response
from modules.const import ERR, NO, OK, INFO
def main ():
if len (argv) > 1:
print INFO + 'Checking site...'
if not is_good_response (argv [1]):
print ERR + 'Si
|
te is unavailable! :('
exit (-1)
print INFO + 'Detecting wordpress...'
wpd = WordpressDetector (argv [1])
if wpd.detect_by_pages ():
print OK + 'Wordpress Detected!'
if raw_input ('Try to detect Wordpress version? (y/n): ') == 'y':
print INFO + 'Detecting Wordpress version...'
dec = wpd.detect_version ()
if dec is not None:
print OK + 'Wordpress Version Detected!' + dec
else:
print NO + 'Wordpress version getting failed!'
exit (0)
else:
print NO + 'This is not Wordpress! :('
else:
print ERR + 'Example: ./detector.py http://blabla.com'
if __name__ == '__main__':
main ()
|
steinarvk/rigour
|
rigour/tests/test_secrecy.py
|
Python
|
apache-2.0
| 1,198
| 0.012521
|
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except
|
in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or imp
|
lied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from rigour.errors import ValidationFailed
from rigour.types import *
from rigour.constraints import length_between
import rigour
import pytest
def test_secrecy_declared_before():
t = String().secret().constrain(length_between(4,6))
with pytest.raises(ValidationFailed) as excinfo:
t.check("xxx")
message = str(excinfo)
assert "xxx" not in message
def test_secrecy_declared_after():
t = String().constrain(length_between(4,6)).secret()
with pytest.raises(ValidationFailed) as excinfo:
t.check("xxx")
message = str(excinfo)
assert "xxx" not in message
|
hectorsanchez/acheckersgame
|
intro2.py
|
Python
|
gpl-2.0
| 274
| 0.007299
|
import pygame
import intro
import game
class Intro2(intro.Intro):
def load_image(self):
self.
|
fondo = pygame.image.load('ima/intro2.png').convert()
def go_
|
to_next(self):
new_scene = game.Game(self.world)
self.world.change_scene(new_scene)
|
jacksonwilliams/arsenalsuite
|
cpp/lib/PyQt4/examples/tutorial/t3.py
|
Python
|
gpl-2.0
| 367
| 0
|
#!/usr/bin/env python
# PyQt tutorial 3
import s
|
ys
from PyQt4 import QtGui
app = QtGui.QApplication(sys.argv)
window = QtGui.QWidget()
window.resize(200, 120)
quit = QtGui.QPushButton("Quit", window)
quit.setFont(QtGui.QFont("Times", 18, QtGui.QFont.Bold))
quit.setGeometry(10, 40, 180, 40)
quit.clicked.connec
|
t(app.quit)
window.show()
sys.exit(app.exec_())
|
ucsb-seclab/ictf-framework
|
scoring_ictf/scoring_ictf/game_state_interface.py
|
Python
|
gpl-2.0
| 943
| 0
|
class GameStateInterface(object):
def __init__(self):
self._team_ids_to_names = None
self._service_ids_to_names = None
def _team_id_to_name_map(self):
raise NotImplemen
|
tedError
def _service_id_to_name_map(self):
raise NotImplementedError
def _scored_events_for_tick(self, tick):
raise NotImplementedError
@property
def team_id_to_name_map(self):
if self._team_ids_to_names is None:
self._team_ids_to_names = self._team_id_
|
to_name_map()
return self._team_ids_to_names
@property
def service_id_to_name_map(self):
if self._service_ids_to_names is None:
self._service_ids_to_names = self._service_id_to_name_map()
return self._service_ids_to_names
def scored_events_for_tick(self, tick):
# TODO: maybe cache here? or do we cache in the database side?
return self._scored_events_for_tick(tick)
|
turbokongen/home-assistant
|
tests/components/wemo/entity_test_helpers.py
|
Python
|
apache-2.0
| 5,991
| 0.002838
|
"""Test cases that are in common among wemo platform modules.
This is not a test module. These test methods are used by the platform test modules.
"""
import asyncio
import threading
from unittest.mock import patch
from pywemo.ouimeaux_device.api.service import ActionException
from homeassistant.components.homeassistant import (
DOMAIN as HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
)
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF, STATE_UNAVAILABLE
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
def _perform_registry_callback(hass, pywemo_registry, pywemo_device):
"""Return a callable method to trigger a state callback from the device."""
@callback
def async_callback():
# Cause a state update callback to be triggered by the device.
pywemo_registry.callbacks[pywemo_device.name](pywemo_device, "", "")
return hass.async_block_till_done()
return async_callback
def _perform_async_update(hass, wemo_entity):
"""Return a callable method to cause hass to update the state of the entity."""
@callback
def async_callback():
return hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: [wemo_entity.entity_id]},
blocking=True,
)
return async_callback
async def _async_multiple_call_helper(
hass,
pywemo_registry,
wemo_entity,
pywemo_device,
call1,
call2,
update_polling_method=None,
):
"""Create two calls (call1 & call2) in parallel; verify only one polls the device.
The platform entity should only perform one update poll on the device at a time.
Any parallel updates that happen at the same time should be ignored. This is
verified by blocking in the update polling method. The polling method should
only be called once as a result of calling call1 & call2 simultaneously.
"""
# get_state is called outside the event loop. Use non-async Python Event.
event = threading.Event()
def get_update(force_update=True):
event.wait()
update_polling_method = update_polling_method or pywemo_device.get_state
update_polling_method.side_effect = get_update
# One of these two calls will block on `event`. The other will return right
# away because the `_update_lock` is held.
_, pending = await asyncio.wait(
[call1(), call2()], return_when=asyncio.FIRST_COMPLETED
)
# Allow the blocked call to return.
event.set()
if pending:
await asyncio.wait(pending)
# Make sure the state update only happened once.
update_polling_method.assert_called_once()
async def test_async_update_locked_callback_and_update(
hass, pywemo_registry, wemo_entity, pywemo_device, **kwargs
):
"""Test that a callback and a state update request can't both happen at the same time.
When a state update is received via a callback from the device at the same time
as hass is calling `async_update`, verify that only one of the updates proceeds.
"""
await async_setup_component(hass, HA_DOMAIN, {})
callback = _perform_registry_callback(hass, pywemo_registry, pywemo_device)
update = _perform_async_update(hass, wemo_entity)
await _async_multiple_call_helper(
hass, pywemo_registry, wemo_entity, pywemo_device, callback, update, **kwargs
)
async def test_async_update_locked_multiple_updates(
hass, pywemo_registry, wemo_entity, pywemo_device, **kwargs
):
"""Test that two hass async_update state updates do not pr
|
oceed at the same time."""
await async_setup_component(hass, HA_DOMAIN, {})
update = _perform_async_update(hass, wemo_entity)
await _async_multiple_call_helper(
hass, pywemo_registry, wemo_entity, pywemo_device, update, update, **kwargs
)
async def test_async_update_locked_multiple_callbacks(
hass, pywemo_registry, wemo_entity, pywemo_device, **kwargs
):
"""Test that two device callback s
|
tate updates do not proceed at the same time."""
await async_setup_component(hass, HA_DOMAIN, {})
callback = _perform_registry_callback(hass, pywemo_registry, pywemo_device)
await _async_multiple_call_helper(
hass, pywemo_registry, wemo_entity, pywemo_device, callback, callback, **kwargs
)
async def test_async_locked_update_with_exception(
hass, wemo_entity, pywemo_device, update_polling_method=None
):
"""Test that the entity becomes unavailable when communication is lost."""
assert hass.states.get(wemo_entity.entity_id).state == STATE_OFF
await async_setup_component(hass, HA_DOMAIN, {})
update_polling_method = update_polling_method or pywemo_device.get_state
update_polling_method.side_effect = ActionException
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: [wemo_entity.entity_id]},
blocking=True,
)
assert hass.states.get(wemo_entity.entity_id).state == STATE_UNAVAILABLE
async def test_async_update_with_timeout_and_recovery(hass, wemo_entity, pywemo_device):
"""Test that the entity becomes unavailable after a timeout, and that it recovers."""
assert hass.states.get(wemo_entity.entity_id).state == STATE_OFF
await async_setup_component(hass, HA_DOMAIN, {})
with patch("async_timeout.timeout", side_effect=asyncio.TimeoutError):
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: [wemo_entity.entity_id]},
blocking=True,
)
assert hass.states.get(wemo_entity.entity_id).state == STATE_UNAVAILABLE
# Check that the entity recovers and is available after the update succeeds.
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: [wemo_entity.entity_id]},
blocking=True,
)
assert hass.states.get(wemo_entity.entity_id).state == STATE_OFF
|
Hofsmo/psse_models
|
setup.py
|
Python
|
gpl-3.0
| 223
| 0.004484
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 14 14:10:41 2016
@author: s
|
igurdja
"""
from setuptools import setup, find_packages
setup(
name="
|
psse_models",
version="0.1",
packages=find_packages(),
)
|
strfry/OpenNFB
|
protocols/2_ch_c3beta_c4smr_kro.py
|
Python
|
gpl-3.0
| 3,567
| 0.024951
|
# Thanks to Kurt Othmer for BioExplorer design this is translated from
from flow import *
class Flow(object):
def init(self, context):
ch1 = context.get_channel('Channel 1')
#ch1 = Notch(50, input=ch1)
ch1_dc = DCBlock(ch1).ac
ch1_raw = BandPass(0.0, 40.0, input=ch1_dc)
ch1_theta = BandPass(3.0, 7.0, input=ch1_raw, type='elliptic', order=3).output
ch1_beta = BandPass(15.0, 18.0, input=ch1_raw, type='ellipic', order=3).output
ch1_hibeta = BandPass(22, 38.0, input=ch1_raw, type='elliptic', order=3).output
ch1_raw.set(label='Left Raw: 0-40', color='white')
ch1_theta.set(label='Left Theta', color='violet')
ch1_beta.set(label='Left Beta', color='green')
ch1_hibeta.set(label='Left Hi Beta', color='yellow')
self.ch1_theta_threshold = Threshold('L Theta', input=RMS(ch1_theta), mode='decrease', auto_target=90)
self.ch1_beta_threshold = Threshold('L Beta', input=RMS(ch1_beta), mode='range', low_target=90, high_target=95)
self.ch1_hibeta_threshold = Threshold('L Hi-Beta', input=RMS(ch1_hibeta), mode='decrease', auto_target=95)
self.ch1_osci = Oscilloscope('Left Side', moving=False,
channels=[ch1_raw, ch1_theta, ch1_beta, ch1_hibeta])
self.left_spectrum = BarSpectrogram('Left', lo=2.0, hi=30.0, input=ch1_raw, align='right')
ch2 = context.get_channel('Channel 2')
#ch2 = Notch(50, input=ch2)
ch2_dc = DCBlock(ch2).ac
ch2_raw = BandPass(0.0, 40.0, input=ch2_dc)
ch2_theta = BandPass(3.0, 7.0, input=ch2_raw, type='elliptic', order=3).output
ch2_smr = BandPass(12.0, 15.0, input=ch2_raw, type='ellipic', order=3).output
ch2_hibeta = BandPass(22, 38.0, input=ch2_raw, type='elliptic', order=3).output
ch2_raw.set(label='Right Raw: 0-40', color='white')
ch2_theta.set(label='Right Theta', color='violet')
ch2_smr.set(label='Right SMR', color='blue')
ch2_hibeta.set(label='Right Hi Beta', color='yellow')
self.ch2_theta_threshold = Threshold('R Theta', input=RMS(ch2_theta), mode='decrease', auto_target=90)
self.ch2_smr_threshold = Threshold('R SMR', input=RMS(ch2_smr), mode='range', low_target=90, high_target=95)
self.ch2_hibeta_threshold = Threshold('R Hi-Beta', input=RMS(ch2_hibeta), mode='decrease', auto_target=95)
self.ch2_osci = Oscilloscope('Right Side', moving=False,
channels=[ch2_raw, ch2_theta, ch2_smr, ch2_hibeta])
self.right_spectrum = BarSpectrogram('Right', lo=2.0, hi=30.0, input=ch2_raw, align='left')
and_cond = Expression(lambda *args: all(args),
self.ch1_theta_threshold.passfail, self.ch1_beta_threshold.passfail, self.ch1_hibeta_threshold.passfail,
#self.ch2_theta_threshold.passfail, self.ch2_smr_threshold.passfail, self.ch2_hibeta_threshold.passfail
)
video_path = '/Users/jonathansieber/Movies/Adventure.Time.S06E22.The.Cooler.720p.HDTV.x264-W4F.mkv'
self.video = MPlayerControl(video_path, enable=and_cond)
def widget(self):
w = QtGui.QWidget()
layout = QtGui.QGridLayout()
w.setLayout(layout)
layout.addWidget(self.ch1_osci.widget(), 0, 0, 1, 4)
layout.addWidget(self.ch1_theta_threshold.widget(), 1, 0)
|
layout.addWidget(self.ch1_beta_threshold.widget(), 1, 1)
layout.addWidget(self.ch1_hibeta_threshold.widget(), 1, 2)
layout.addWidget(self.left_spectrum.widget(), 1, 3)
layout.addWidget(self.ch2_osci.widget(), 0, 4, 1, 4)
layout.addWidget(self.ch2_theta_threshold.widget(), 1, 5)
layout.addWidget(self.ch2_smr_thresh
|
old.widget(), 1, 6)
layout.addWidget(self.ch2_hibeta_threshold.widget(), 1, 7)
layout.addWidget(self.right_spectrum.widget(), 1, 4)
return w
def flow():
return Flow()
|
jgomezdans/grabba_grabba_hey
|
grabba_grabba_hey/sentinel3_downloader.py
|
Python
|
gpl-2.0
| 9,042
| 0.003981
|
#!/usr/bin/env python
"""
A simple interface to download Sentinel-1 and Sentinel-2 datasets from
the COPERNICUS Sentinel Hub.
"""
from functools import partial
import hashlib
import os
import datetime
import sys
import xml.etree.cElementTree as ET
import re
import requests
from concurrent import futures
import logging
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
logging.getLogger("requests").setLevel(logging.CRITICAL)
logging.getLogger("urllib3").setLevel(logging.CRITICAL)
# hub_url = "https://scihub.copernicus.eu/dhus/search?q="
#hub_url = "https://scihub.copernicus.eu/s3hub/search?q="
hub_url= "https://scihub.copernicus.eu/apihub/search?q="
requests.packages.urllib3.disable_warnings()
def calculate_md5(fname):
hasher = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hasher.update(chunk)
return hasher.hexdigest().upper()
def do_query(query, user="guest", passwd="guest"):
"""
A simple function to pass a query to the Sentinel scihub website. If
successful this function will return the XML file back for further
processing.
query: str
A query string, such as "https://scihub.copernicus.eu/dhus/odata/v1/"
"Products?$orderby=IngestionDate%20desc&$top=100&$skip=100"
Returns:
The relevant XML file, or raises error
"""
r =
|
requests.get(query, auth=(user, passwd), verify=False)
if r.status_code == 200:
return r.text
else:
raise IOError("Something went wrong! Error code %d" % r.status_code)
def download_product(source, target, user="guest", passwd="guest"):
"""
Download a product from the SentinelScihub site, and save it to a named
local disk location given by ``target``.
|
source: str
A product fully qualified URL
target: str
A filename where to download the URL specified
"""
md5_source = source.replace("$value", "/Checksum/Value/$value")
r = requests.get(md5_source, auth=(user, passwd), verify=False)
md5 = r.text
if os.path.exists(target):
md5_file = calculate_md5(target)
if md5 == md5_file:
return
chunks = 1048576 # 1MiB...
while True:
LOG.debug("Getting %s" % source)
r = requests.get(source, auth=(user, passwd), stream=True,
verify=False)
if not r.ok:
raise IOError("Can't start download... [%s]" % source)
file_size = int(r.headers['content-length'])
LOG.info("Downloading to -> %s" % target)
LOG.info("%d bytes..." % file_size)
with open(target, 'wb') as fp:
cntr = 0
dload = 0
for chunk in r.iter_content(chunk_size=chunks):
if chunk:
cntr += 1
if cntr > 100:
dload += cntr * chunks
LOG.info("\tWriting %d/%d [%5.2f %%]" % (dload, file_size,
100. * float(dload) /
float(file_size)))
sys.stdout.flush()
cntr = 0
fp.write(chunk)
fp.flush()
os.fsync(fp)
md5_file = calculate_md5(target)
if md5_file == md5:
break
return
def parse_xml(xml):
"""
Parse an OData XML file to havest some relevant information re products
available and so on. It will return a list of dictionaries, with one
dictionary per product returned from the query. Each dicionary will have a
number of keys (see ``fields_of_interest``), as well as ``link`` and
``qui
"""
fields_of_interest = ["filename", "identifier", "instrumentshortname",
"orbitnumber", "orbitdirection", "producttype",
"beginposition", "endposition"]
tree = ET.ElementTree(ET.fromstring(xml))
# Search for all the acquired images...
granules = []
for elem in tree.iter(tag="{http://www.w3.org/2005/Atom}entry"):
granule = {}
for img in elem.getchildren():
if img.tag.find("id") >= 0:
granule['id'] = img.text
if img.tag.find("link") and img.attrib.has_key("href"):
if img.attrib['href'].find("Quicklook") >= 0:
granule['quicklook'] = img.attrib['href']
elif img.attrib['href'].find("$value") >= 0:
granule['link'] = img.attrib['href'].replace("$value", "")
if img.attrib.has_key("name"):
if img.attrib['name'] in fields_of_interest:
granule[img.attrib['name']] = img.text
granules.append(granule)
return granules
# print img.tag, img.attrib, img.text
# for x in img.getchildren():
def download_sentinel(location, input_start_date, input_sensor, output_dir,
input_end_date=None, username="guest", password="guest"):
input_sensor = input_sensor.upper()
sensor_list = ["S1", "S2", "S3"]
if not input_sensor in sensor_list:
raise ValueError("Sensor can only be S1, S2 or S3. You provided %s"
% input_sensor)
else:
if input_sensor.upper() == "S1":
sensor = "Sentinel-1"
elif input_sensor.upper() == "S2":
sensor = "Sentinel-2"
elif input_sensor.upper() == "S3":
sensor= "Sentinel-3"
sensor_str = 'platformname:%s' % sensor
#sensor_str = 'filename:%s' % input_sensor.upper()
try:
start_date = datetime.datetime.strptime(input_start_date,
"%Y.%m.%d").isoformat()
except ValueError:
try:
start_date = datetime.datetime.strptime(input_start_date,
"%Y-%m-%d").isoformat()
except ValueError:
start_date = datetime.datetime.strptime(input_start_date,
"%Y/%j").isoformat()
start_date = start_date + "Z"
if input_end_date is None:
end_date = "NOW"
else:
try:
end_date = datetime.datetime.strptime(input_end_date,
"%Y.%m.%d").isoformat()
except ValueError:
try:
end_date = datetime.datetime.strptime(input_end_date,
"%Y-%m-%d").isoformat()
except ValueError:
end_date = datetime.datetime.strptime(input_end_date,
"%Y/%j").isoformat()
if len(location) == 2:
location_str = 'footprint:"Intersects(%f, %f)"' % (location[0], location[1])
elif len(location) == 4:
location_str = 'footprint:"Intersects( POLYGON(( " + \
"%f %f, %f %f, %f %f, %f %f, %f %f) ))"' % (
location[0], location[0],
location[0], location[1],
location[1], location[1],
location[1], location[0],
location[0], location[0])
time_str = 'beginposition:[%s TO %s]' % (start_date, end_date)
query = "%s AND %s AND %s" % (location_str, time_str, sensor_str)
query = "%s%s" % (hub_url, query)
# query = "%s%s" % ( hub_url, urllib2.quote(query ) )
LOG.debug(query)
import pdb;pdb.set_trace()
result = do_query(query, user=username, passwd=password)
granules = parse_xml(result)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
ret_files = []
for granule in granules:
download_product(granule['link'] + "$value", os.path.join(output_dir,
granule['filename'].replace("SAFE", "zip")),
user=username, passwd=password)
ret_files.append(os.path.join(output_dir,
granule['filename'].replace("SAFE", "zip")))
return granules, ret_files
if __name__ == "__main__": # location = (43.3650,
|
turbulenz/turbulenz_tools
|
turbulenz_tools/tools/exportevents.py
|
Python
|
mit
| 25,568
| 0.003559
|
#!/usr/bin/env python
# Copyright (c) 2012-2013 Turbulenz Limited
from logging import basicConfig, CRITICAL, INFO, WARNING
import argparse
from urllib3 import connection_from_url
from urllib3.exceptions import HTTPError, SSLError
from simplejson import loads as json_loads, dump as json_dump
from gzip import GzipFile
from zlib import decompress as zlib_decompress
from time import strptime, strftime, gmtime
from calendar import timegm
from re import compile as re_compile
from sys import stdin, argv
from os import mkdir
from os.path import exists as path_exists, join as path_join, normpath
from getpass import getpass, GetPassWarning
from base64 import urlsafe_b64decode
__version__ = '2.1.2'
__dependencies__ = []
HUB_COOKIE_NAME = 'hub'
HUB_URL = 'https://hub.turbulenz.com/'
DATATYPE_DEFAULT = 'events'
DATATYPE_URL = { 'events': '/dynamic/project/%s/event-log',
'users': '/dynamic/project/%s/user-info' }
DAY = 86400
TODAY_START = (timegm(gmtime()) / DAY) * DAY
# pylint: disable=C0301
USERNAME_PATTERN = re_compile('^[a-z0-9]+[a-z0-9-]*$') # usernames
PROJECT_SLUG_PATTERN = re_compile('^[a-zA-Z0-9\-]*$') # game
# pylint: enable=C0301
class DateRange(object):
"""Maintain a time range between two dates. If only a start time is given it will generate a 24 hour period
starting at that time. Defaults to the start of the current day if no times are given"""
def __init__(self, start=TODAY_START, end=None):
self.start = start
if end:
self.end = end
else:
self.end = start + DAY
if self.start > self.end:
raise ValueError('Start date can\'t be greater than the end date')
def _range_str(t):
if t % DAY:
return strftime('%Y-%m-%d %H:%M:%SZ', gmtime(t))
else:
return strftime('%Y-%m-%d', gmtime(t))
self.start_str = _range_str(self.start)
if self.end % DAY:
self.end_str = _range_str(self.end)
else:
self.end_str = _range_str(self.end - DAY)
def filename_str(self):
if self.start_str == self.end_str:
return self.start_str
elif int(self.start / DAY) == int(self.end / DAY):
result = '%s_-_%s' % (strftime('%Y-%m-%d %H:%M:%SZ', gmtime(self.start)),
strftime('%Y-%m-%d %H:%M:%SZ', gmtime(self.end)))
return result.replace(' ', '_').replace(':', '-')
else:
result = '%s_-_%s' % (self.start_str, self.end_str)
return result.replace(' ', '_').replace(':', '-')
@staticmethod
def parse(range_str):
date_format = '%Y-%m-%d'
range_parts = range_str.split(':')
if len(range_parts) < 1:
error('Date not set')
exit(1)
elif len(range_parts) > 2:
error('Can\'t provide more than two dates for date range')
exit(1)
try:
start = int(timegm(strptime(range_parts[0], date_format)))
end = None
if len(range_parts) == 2:
end = int(timegm(strptime(range_parts[1], date_format))) + DAY
except ValueError:
error('Dates must be in the yyyy-mm-dd format')
exit(1)
return DateRange(start, end)
def log(message, new_line=True):
|
print '\r >> %s' % message,
if new_line:
print
def error(message):
log('[ERROR] - %s' % message)
def warning(message):
log('[WARNING] - %s' % message)
def _parse_args():
parser = argparse.ArgumentParser(description="Export event logs and anonymised user information of a game.")
parser.add_argument("-v", "--verbose", action="store_true", help="verbose output")
parser.add_argument("-s", "--silent", action="stor
|
e_true", help="silent running")
parser.add_argument("--version", action='version', version=__version__)
parser.add_argument("-u", "--user", action="store",
help="Hub login username (will be requested if not provided)")
parser.add_argument("-p", "--password", action="store",
help="Hub login password (will be requested if not provided)")
parser.add_argument("-t", "--type", action="store", default=DATATYPE_DEFAULT,
help="type of data to download, either events or users (defaults to " + DATATYPE_DEFAULT + ")")
parser.add_argument("-d", "--daterange", action="store", default=TODAY_START,
help="individual 'yyyy-mm-dd' or range 'yyyy-mm-dd : yyyy-mm-dd' of dates to get the data " \
"for (defaults to today)")
parser.add_argument("-o", "--outputdir", action="store", default="",
help="folder to output the downloaded files to (defaults to current directory)")
parser.add_argument("-w", "--overwrite", action="store_true",
help="if a file to be downloaded exists in the output directory, " \
"overwrite instead of skipping it")
parser.add_argument("--indent", action="store_true", help="apply indentation to the JSON output")
parser.add_argument("--hub", default=HUB_URL, help="Hub url (defaults to https://hub.turbulenz.com/)")
parser.add_argument("project", metavar='project_slug', help="Slug of Hub project you wish to download from")
args = parser.parse_args(argv[1:])
if args.silent:
basicConfig(level=CRITICAL)
elif args.verbose:
basicConfig(level=INFO)
else:
basicConfig(level=WARNING)
if not PROJECT_SLUG_PATTERN.match(args.project):
error('Incorrect "project" format')
exit(-1)
username = args.user
if not username:
print 'Username: ',
username = stdin.readline()
if not username:
error('Login information required')
exit(-1)
username = username.strip()
args.user = username
if not USERNAME_PATTERN.match(username):
error('Incorrect "username" format')
exit(-1)
if not args.password:
try:
args.password = getpass()
except GetPassWarning:
error('Echo free password entry unsupported. Please provide a --password argument')
return -1
if args.type not in ['events', 'users']:
error('Type must be one of \'events\' or \'users\'')
exit(1)
if isinstance(args.daterange, int):
args.daterange = DateRange(args.daterange)
else:
args.daterange = DateRange.parse(args.daterange)
return args
def login(connection, options):
username = options.user
password = options.password
if not options.silent:
log('Login as "%s".' % username)
credentials = {'login': username,
'password': password,
'source': '/tool'}
try:
r = connection.request('POST',
'/dynamic/login',
fields=credentials,
retries=1,
redirect=False)
except (HTTPError, SSLError):
error('Connection to Hub failed!')
exit(-1)
if r.status != 200:
if r.status == 301:
redirect_location = r.headers.get('location', '')
end_domain = redirect_location.find('/dynamic/login')
error('Login is being redirected to "%s". Please verify the Hub URL.' % redirect_location[:end_domain])
else:
error('Wrong user login information!')
exit(-1)
cookie = r.headers.get('set-cookie', None)
login_info = json_loads(r.data)
# pylint: disable=E1103
if not cookie or HUB_COOKIE_NAME not in cookie or login_info.get('source') != credentials['source']:
error('Hub login failed!')
exit(-1)
# pylint: enable=E1103
return cookie
def logout(connection, cookie):
try:
connection.request('POST',
'/dynamic/logout',
headers={'Cookie': cookie},
redirect=False)
except (HTTPError, SSLError) as e:
e
|
nttks/jenkins-test
|
lms/djangoapps/bulk_email/tests/test_tasks.py
|
Python
|
agpl-3.0
| 24,083
| 0.003737
|
"""
Unit tests for LMS instructor-initiated background tasks.
Runs tasks on answers to course problems to validate that code
paths actually work.
"""
import json
from uuid import uuid4
from itertools import cycle, chain, repeat
from mock import patch, Mock
from smtplib import SMTPServerDisconnected, SMTPDataError, SMTPConnectError, SMTPAuthenticationError
from boto.ses.exceptions import (
SESAddressNotVerifiedError,
SESIdentityNotVerifiedError,
SESDomainNotConfirmedError,
SESAddressBlacklistedError,
SESDailyQuotaExceededError,
SESMaxSendingRateExceededError,
SESDomainEndsWithDotError,
SESLocalAddressCharacterError,
SESIllegalAddressError,
)
from boto.exception import AWSConnectionError
from celery.states import SUCCESS, FAILURE
from django.conf import settings
from django.core.management import call_command
from bulk_email.models import CourseEmail, Optout, SEND_TO_ALL, SEND_TO_ALL_INCLUDE_OPTOUT
from bulk_email.tasks import _filter_optouts_from_recipients
from instructor_task.tasks import send_bulk_course_email
from instructor_task.subtasks import update_subtask_status, SubtaskStatus
from instructor_task.models import InstructorTask
from instructor_task.tests.test_base import InstructorTaskCourseTestCase
from instructor_task.tests.factories import InstructorTaskFactory
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from student.models import UserStanding
class TestTaskFailure(Exception):
"""Dummy exception used for unit tests."""
pass
def my_update_subtask_status(entry_id, current_task_id, new_subtask_status):
"""
Check whether a subtask has been updated before really updating.
Check whether a subtask which has been retried
has had the retry already write its results here before the code
that was invoking the retry had a chance to update this status.
This is the norm in "eager" mode (used by tests) where the retry is called
and run to completion before control is returned to the code that
invoked the retry. If the retries eventually end in failure (e.g. due to
a maximum number of retries being attempted), the "eager" code will return
the error for each retry as it is popped off the stack. We want to just ignore
the later updates that are called as the result of the earlier retries.
This should not be an issue in production, where status is updated before
a task is retried, and is then updated afterwards if the retry fails.
"""
entry = InstructorTask.objects.get(pk=entry_id)
subtask_dict = json.loads(entry.subtasks)
subtask_status_info = subtask_dict['status']
current_subtask_status = SubtaskStatus.from_dict(subtask_status_info[current_task_id])
current_retry_count = current_subtask_status.get_retry_count()
new_retry_count = new_subtask_status.get_retry_count()
if current_retry_count <= new_retry_count:
update_subtask_status(entry_id, current_task_id, new_subtask_status)
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message'))
class TestBulkEmailInstructorTask(InstructorTaskCourseTestCase):
"""Tests instructor task that send bulk email."""
def setUp(self):
super(TestBulkEmailInstructorTask, self).setUp()
self.initialize_course()
self.instructor = self.create_instructor('instructor')
# load initial content (since we don't run migrations as part of tests):
call_command("loaddata", "course_email_template.json")
def _create_input_entry(self, course_id=None, to_option=None):
"""
Creates a InstructorTask entry for testing.
Overrides the base class version in that this creates CourseEmail.
"""
to_option = to_option or SEND_TO_ALL
course_id = course_id or self.course.id
course_email = CourseEmail.create(course_id, self.instructor, to_option, "Test Subject", "<p>This is a test message</p>")
task_input = {'email_id': course_email.id} # pylint: disable=no-member
task_id = str(uuid4())
instructor_task = InstructorTaskFactory.create(
course_id=course_id,
requester=self.instructor,
task_input=json.dumps(task_input),
task_key='dummy value',
task_id=task_id,
)
return instructor_task
def _run_task_with_mock_celery(self, task_class, entry_id, task_id):
"""Submit a task and mock how celery provides a current_task."""
mock_current_task = Mock()
mock_current_task.max_retries = settings.BULK_EMAIL_MAX_RETRIES
mock_current_task.default_retry_delay = settings.BULK_EMAIL_DEFAULT_RETRY_DELAY
task_args = [entry_id, {}]
with patch('bulk_email.tasks._get_current_task') as mock_get_task:
mock_get_task.return_value = mock_current_task
return task_class.apply(task_args, task_id=task_id).get()
def test_email_missing_current_task(self):
task_entry = self._create_input_entry()
with self.assertRaises(ValueError):
send_bulk_course_email(task_entry.id, {})
def test_email_undefined_course(self):
# Check that we fail when passing in a course that doesn't exist.
task_entry = self._create_input_entry(course_id=SlashSeparatedCourseKey("bogus", "course", "id"))
with self.assertRaises(ValueError):
se
|
lf._run_task_with_mock_celery(send_bulk_course_email, task_entry.id, task_entry.task_id)
def test_bad_task_id_on_update(self):
task_entry = self._create_input_entry()
def dummy_update_subtask_status(entry
|
_id, _current_task_id, new_subtask_status):
"""Passes a bad value for task_id to test update_subtask_status"""
bogus_task_id = "this-is-bogus"
update_subtask_status(entry_id, bogus_task_id, new_subtask_status)
with self.assertRaises(ValueError):
with patch('bulk_email.tasks.update_subtask_status', dummy_update_subtask_status):
send_bulk_course_email(task_entry.id, {}) # pylint: disable=no-member
def _create_students(self, num_students):
"""Create students for testing"""
return [self.create_student('robot%d' % i) for i in xrange(num_students)]
def _assert_single_subtask_status(self, entry, succeeded, failed=0, skipped=0, retried_nomax=0, retried_withmax=0):
"""Compare counts with 'subtasks' entry in InstructorTask table."""
subtask_info = json.loads(entry.subtasks)
# verify subtask-level counts:
self.assertEquals(subtask_info.get('total'), 1)
self.assertEquals(subtask_info.get('succeeded'), 1 if succeeded > 0 else 0)
self.assertEquals(subtask_info.get('failed'), 0 if succeeded > 0 else 1)
# verify individual subtask status:
subtask_status_info = subtask_info.get('status')
task_id_list = subtask_status_info.keys()
self.assertEquals(len(task_id_list), 1)
task_id = task_id_list[0]
subtask_status = subtask_status_info.get(task_id)
print("Testing subtask status: {}".format(subtask_status))
self.assertEquals(subtask_status.get('task_id'), task_id)
self.assertEquals(subtask_status.get('attempted'), succeeded + failed)
self.assertEquals(subtask_status.get('succeeded'), succeeded)
self.assertEquals(subtask_status.get('skipped'), skipped)
self.assertEquals(subtask_status.get('failed'), failed)
self.assertEquals(subtask_status.get('retried_nomax'), retried_nomax)
self.assertEquals(subtask_status.get('retried_withmax'), retried_withmax)
self.assertEquals(subtask_status.get('state'), SUCCESS if succeeded > 0 else FAILURE)
def _test_run_with_task(self, task_class, action_name, total, succeeded, failed=0, skipped=0, retried_nomax=0, retried_withmax=0):
"""Run a task and check the number of emails processed."""
task_entry = self._create_input_entry()
parent_status = self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id)
# check return value
self.assertEquals(parent_status.get(
|
nabilbendafi/mbed
|
workspace_tools/toolchains/arm.py
|
Python
|
apache-2.0
| 7,354
| 0.005167
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from os.path import join
from workspace_tools.toolchains import mbedToolchain
from workspace_tools.settings import ARM_BIN, ARM_INC, ARM_LIB, MY_ARM_CLIB, ARM_CPPLIB
from workspace_tools.hooks import hook_tool
from workspace_tools.settings import GOANNA_PATH
class ARM(mbedToolchain):
LINKER_EXT = '.sct'
LIBRARY_EXT = '.ar'
STD_LIB_NAME = "%s.ar"
DIAGNOSTIC_PATTERN = re.compile('"(?P<file>[^"]+)", line (?P<line>\d+)( \(column (?P<column>\d+)\)|): (?P<severity>Warning|Error): (?P<message>.+)')
DEP_PATTERN = re.compile('\S+:\s(?P<file>.+)\n')
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
mbedToolchain.__init__(self, target, options, notify, macros, silent, extra_verbose=extra_verbose)
if target.core == "Cortex-M0+":
cpu = "Cortex-M0"
elif target.core == "Cortex-M4F":
cpu = "Cortex-M4.fp"
elif target.core == "Cortex-M7F":
cpu = "Cortex-M7.fp.sp"
else:
cpu = target.core
main_cc = join(ARM_BIN, "armcc")
common = ["-c",
"--cpu=%s" % cpu, "--gnu",
"-Otime", "--split_sections", "--apcs=interwork",
"--brief_diagnostics", "--restrict", "--multibyte_chars"
]
if "save-asm" in self.options:
common.extend(["--asm", "--interleave"])
if "debug-info" in self.options:
common.ap
|
pend("-g")
common.append("-O0")
else:
common.append("-O3")
common_c = [
"--md", "--no_depend_system_headers",
'-I%s' % ARM_INC
]
self.asm = [main_cc] + common + ['-I%s' % ARM_INC]
if not "analyze" in self.options:
self.cc = [main_cc] + common + common_c + ["--c99"]
self.cppc = [main_cc] + common + common_c + ["--cpp", "--no_rtti"]
|
else:
self.cc = [join(GOANNA_PATH, "goannacc"), "--with-cc=" + main_cc.replace('\\', '/'), "--dialect=armcc", '--output-format="%s"' % self.GOANNA_FORMAT] + common + common_c + ["--c99"]
self.cppc= [join(GOANNA_PATH, "goannac++"), "--with-cxx=" + main_cc.replace('\\', '/'), "--dialect=armcc", '--output-format="%s"' % self.GOANNA_FORMAT] + common + common_c + ["--cpp", "--no_rtti"]
self.ld = [join(ARM_BIN, "armlink")]
self.sys_libs = []
self.ar = join(ARM_BIN, "armar")
self.elf2bin = join(ARM_BIN, "fromelf")
def remove_option(self, option):
for tool in [self.asm, self.cc, self.cppc]:
if option in tool:
tool.remove(option)
def assemble(self, source, object, includes):
# Preprocess first, then assemble
tempfile = object + '.E.s'
return [
self.asm + ['-D%s' % s for s in self.get_symbols() + self.macros] + ["-I%s" % i for i in includes] + ["-E", "-o", tempfile, source],
self.hook.get_cmdline_assembler(self.asm + ["-o", object, tempfile])
]
def parse_dependencies(self, dep_path):
dependencies = []
for line in open(dep_path).readlines():
match = ARM.DEP_PATTERN.match(line)
if match is not None:
dependencies.append(match.group('file'))
return dependencies
def parse_output(self, output):
for line in output.splitlines():
match = ARM.DIAGNOSTIC_PATTERN.match(line)
if match is not None:
self.cc_info(
match.group('severity').lower(),
match.group('file'),
match.group('line'),
match.group('message'),
target_name=self.target.name,
toolchain_name=self.name
)
match = self.goanna_parse_line(line)
if match is not None:
self.cc_info(
match.group('severity').lower(),
match.group('file'),
match.group('line'),
match.group('message')
)
def get_dep_opt(self, dep_path):
return ["--depend", dep_path]
def archive(self, objects, lib_path):
self.default_cmd([self.ar, '-r', lib_path] + objects)
def link(self, output, objects, libraries, lib_dirs, mem_map):
if len(lib_dirs):
args = ["-o", output, "--userlibpath", ",".join(lib_dirs), "--info=totals", "--list=.link_totals.txt"]
else:
args = ["-o", output, "--info=totals", "--list=.link_totals.txt"]
if mem_map:
args.extend(["--scatter", mem_map])
if hasattr(self.target, "link_cmdline_hook"):
args = self.target.link_cmdline_hook(self.__class__.__name__, args)
self.default_cmd(self.ld + args + objects + libraries + self.sys_libs)
@hook_tool
def binary(self, resources, elf, bin):
args = [self.elf2bin, '--bin', '-o', bin, elf]
if hasattr(self.target, "binary_cmdline_hook"):
args = self.target.binary_cmdline_hook(self.__class__.__name__, args)
self.default_cmd(args)
class ARM_STD(ARM):
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
ARM.__init__(self, target, options, notify, macros, silent, extra_verbose=extra_verbose)
self.cc += ["-D__ASSERT_MSG"]
self.cppc += ["-D__ASSERT_MSG"]
self.ld.append("--libpath=%s" % ARM_LIB)
class ARM_MICRO(ARM):
PATCHED_LIBRARY = False
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
ARM.__init__(self, target, options, notify, macros, silent, extra_verbose=extra_verbose)
# Compiler
self.asm += ["-D__MICROLIB"]
self.cc += ["--library_type=microlib", "-D__MICROLIB", "-D__ASSERT_MSG"]
self.cppc += ["--library_type=microlib", "-D__MICROLIB", "-D__ASSERT_MSG"]
# Linker
self.ld.append("--library_type=microlib")
# We had to patch microlib to add C++ support
# In later releases this patch should have entered mainline
if ARM_MICRO.PATCHED_LIBRARY:
self.ld.append("--noscanlib")
# System Libraries
self.sys_libs.extend([join(MY_ARM_CLIB, lib+".l") for lib in ["mc_p", "mf_p", "m_ps"]])
if target.core == "Cortex-M3":
self.sys_libs.extend([join(ARM_CPPLIB, lib+".l") for lib in ["cpp_ws", "cpprt_w"]])
elif target.core in ["Cortex-M0", "Cortex-M0+"]:
self.sys_libs.extend([join(ARM_CPPLIB, lib+".l") for lib in ["cpp_ps", "cpprt_p"]])
else:
self.ld.append("--libpath=%s" % ARM_LIB)
|
ChopChopKodi/pelisalacarta
|
python/main-classic/lib/btserver/dispatcher.py
|
Python
|
gpl-3.0
| 805
| 0.006211
|
from monitor import Monitor
try:
from python_libtorrent import get_libtorrent
lt = get_libtorrent()
except Exception, e:
import libtorrent as lt
class Dispatcher(Monitor):
def __init__(self, client):
super(Dispatcher,self).__init__(client)
def do_start(self, th, ses):
|
self._th = th
self._ses=ses
self.start()
def run(self):
if not self._ses:
raise Exception('Invalid state, session is not initialized')
while self.running:
a=self._ses.wait_for_alert(1000)
if a:
alerts= self._ses.pop_alerts()
for alert in alerts:
with self.lock:
for cb in self.listeners:
|
cb(lt.alert.what(alert), alert)
|
chipaca/snapcraft
|
snapcraft/plugins/v1/waf.py
|
Python
|
gpl-3.0
| 3,034
| 0
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016, 2018, 2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The WAF plugin is useful to build waf based parts
waf bases projects are projects that drive configuration and build via
a local waf python helper - see https://github.com/waf-project/waf for more
details.
This plugin uses the common plugin keywords as well as those for "sources".
For more information check the 'plugins' topic for the former and the
'sources' topic for the latter.
In addition, this plugin uses the following plugin-specific keywords:
- configflags:
(list of strings)
configure flags to pass to the build such as those shown by running
./waf --help
"""
from snapcraft.plugins.v1 import PluginV1
class WafPlugin(PluginV1):
"""plugin to build via waf build system"""
@classmethod
def schema(cls):
schema = super().schema()
schema["properties"]["configflags"] = {
"type": "array",
"minitems": 1,
"uniqueItems": True,
"items": {"type": "string"},
"default
|
": [],
}
schema["required"] = ["source"]
return schema
|
def __init__(self, name, options, project):
super().__init__(name, options, project)
self._setup_base_tools()
def _setup_base_tools(self):
self.build_packages.append("python-dev:native")
@classmethod
def get_build_properties(cls):
# Inform Snapcraft of the properties associated with building. If these
# change in the YAML Snapcraft will consider the build step dirty.
return ["configflags"]
def env(self, root):
env = super().env(root)
if self.project.is_cross_compiling:
env.extend(
[
"CC={}-gcc".format(self.project.arch_triplet),
"CXX={}-g++".format(self.project.arch_triplet),
]
)
return env
def enable_cross_compilation(self):
# Let snapcraft know that this plugin can cross-compile
# If the method isn't implemented an exception is raised
pass
def build(self):
super().build()
self.run(["./waf", "distclean"])
self.run(["./waf", "configure"] + self.options.configflags)
self.run(["./waf", "build"])
self.run(
["./waf", "install", "--destdir=" + self.installdir]
) # target from snappy env
|
cfe-lab/MiCall
|
micall/tests/test_remap.py
|
Python
|
agpl-3.0
| 54,983
| 0.000527
|
from csv import DictWriter
from io import StringIO
import os
import unittest
from pathlib import Path
from unittest.mock import patch, Mock, DEFAULT
from pytest import fixture
from micall.core import remap
from micall.core.project_config import ProjectConfig
from micall.core.remap import is_first_read, is_short_read, \
MixedReferenceSplitter, write_remap_counts, convert_prelim, read_contigs
from micall.utils.externals import Bowtie2, Bowtie2Build
HXB2_NAME = "HIV1-B-FR-K03455-seed"
@fixture(name='projects', scope="session")
def load_projects():
yield ProjectConfig.loadDefault()
class IsShortReadTest(unittest.TestCase):
def assertCigarIsPrimer(self, cigar, is_primer_expected):
row = {'cigar': cigar}
max_primer_length = 29
self.assertEqual(is_primer_expected, is_short_read(row, max_primer_length))
def testIsPrimerForLongRead(self):
self.assertCigarIsPrimer('45M', False)
def testIsPrimerForShortRead(self):
self.assertCigarIsPrimer('10M', True)
def testIsPrimerForShortReadWithClipping(self):
self.assertCigarIsPrimer('45S10M', True)
def testIsPrimerForReadWithMultipleMatches(self):
self.assertCigarIsPrimer('10M3D45M', False)
class IsFirstReadTest(unittest.TestCase):
def testFirstRead(self):
flag = '99'
is_first_expected = True
is_first = is_first_read(flag)
self.assertEqual(is_first_expected, is_first)
def testSecondRead(self):
flag = '147'
is_first_expected = False
is_first = is_first_read(flag)
self.assertEqual(is_first_expected, is_first)
def testSmallFlag(self):
flag = '3'
is_first_expected = False
is_first = is_first_read(flag)
self.assertEqual(is_first_expected, is_first)
class SamToConseqsTest(unittest.TestCase):
def testSimple(self):
# SAM:qname, flag, rname, pos, mapq, cigar, rnext, pnext, tlen, seq, qual
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t12M\t=\t1\t12\tACAAGACCCAAC\tJJJJJJJJJJJJ\n"
)
expected_conseqs = {'test': 'ACAAGACCCAAC'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testOffset(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t147\ttest\t4\t44\t12M\t=\t3\t-12\tACAAGACCCAAC\tJJJJJJJJJJJJ\n"
)
expected_conseqs = {'test': 'NNNACAAGACCCAAC'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testHeaders(self):
sam_file = StringIO(
"@SH\tsome header\n"
"@MHI\tmost headers are ignored, except SQ for sequence reference\n"
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M\t=\t1\t3\tACA\tJJJ\n"
)
expected_conseqs = {'test': 'ACA'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testUnknownReferenceName(self):
sam_file = StringIO(
"@SQ\tSN:testX\n"
"test1\t99\ttestY\t1\t44\t12M\t=\t1\t3\tACA\tJJJ\n"
)
expected_conseqs = {}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testHeaderFields(self):
sam_file = StringIO(
"@SQ\tOF:other field: ignored\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M\t=\t1\t3\tACA\tJJJ\n"
)
expected_conseqs = {'test': 'ACA'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testExtraFields(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M\t=\t1\t3\tACA\tJJJ\tAS:i:236\tNM:i:12\n"
)
expected_conseqs = {'test': 'ACA'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testMaxConsensus(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M\t=\t1\t3\tACA\tJJJ\n"
"test2\t147\ttest\t1\t44\t3M\t=\t1\t-3\tACA\tJJJ\n"
"test3\t99\ttest\t1\t44\t3M\t=\t1\t3\tTCA\tJJJ\n"
)
expected_conseqs = {'test': 'ACA'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testTie(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"
|
test1\t99\ttest\t1\t44\t3M\t=\t1\t3\tGCA\tJJJ\n"
"tes
|
t2\t147\ttest\t1\t44\t3M\t=\t1\t-3\tTCA\tJJJ\n"
)
expected_conseqs = {'test': 'GCA'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testSoftClip(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3S5M1S\t=\t1\t9\tACAGGGAGA\tJJJJJJJJJ\n"
)
expected_conseqs = {'test': 'GGGAG'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testSimpleInsertion(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M3I3M\t=\t1\t9\tACAGGGAGA\tJJJJJJJJJ\n"
)
expected_conseqs = {'test': 'ACAGGGAGA'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testLowQualityInsertion(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M3I3M\t=\t1\t9\tACAGGGAGA\tJJJJ/JJJJ\n"
)
expected_conseqs = {'test': 'ACAAGA'}
conseqs = remap.sam_to_conseqs(sam_file, quality_cutoff=32)
self.assertDictEqual(expected_conseqs, conseqs)
def testInsertionAfterLowQuality(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M3I3M\t=\t1\t9\tACAGGGAGA\tJJ/JJJJJJ\n"
)
expected_conseqs = {'test': 'ACNAGA'}
conseqs = remap.sam_to_conseqs(sam_file, quality_cutoff=32)
self.assertDictEqual(expected_conseqs, conseqs)
def testInsertionAndOffset(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M3I3M\t=\t1\t9\tACAGGGAGA\tJJJJJJJJJJJJ\n"
"test2\t99\ttest\t5\t44\t5M\t=\t1\t5\tGACCC\tJJJJJ\n"
)
expected_conseqs = {'test': 'ACAGGGAGACCC'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testComplexInsertion(self):
# Insertions are ignored if not a multiple of three
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M1I3M2I6M\t=\t1\t12\tACAGAGAGGCCCAAC\tJJJJJJJJJJJJJJJ\n"
)
expected_conseqs = {'test': 'ACAAGACCCAAC'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testDeletion(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M3D3M\t=\t3\t6\tACAGGG\tJJJJJJ\n"
)
expected_conseqs = {'test': 'ACAGGG'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testDeletionInSomeReads(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M3D3M\t=\t3\t6\tACAGGG\tJJJJJJ\n"
"test2\t99\ttest\t1\t44\t3M3D3M\t=\t3\t6\tACAGGG\tJJJJJJ\n"
"test3\t99\ttest\t1\t44\t9M\t=\t3\t9\tACATTTGGG\tJJJJJJJJJ\n"
)
expected_conseqs = {'test': 'ACATTTGGG'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testDeletionWithFrameShift(self):
sam_file = StringIO(
"@SQ\tSN:test\n"
"test1\t99\ttest\t1\t44\t3M1D3M\t=\t3\t6\tACAGGG\tJJJJJJ\n"
)
expected_conseqs = {'test': 'ACA-GGG'}
conseqs = remap.sam_to_conseqs(sam_file)
self.assertDictEqual(expected_conseqs, conseqs)
def testBigDeletionWithFr
|
floriangeigl/arxiv_converter
|
tex_utils.py
|
Python
|
gpl-3.0
| 969
| 0.002064
|
import re
simple_cmd_match = re.compile(r'\\([^\\]+?)\{(.*?)\}')
graphics_cmd_match = re.compile(r'\\includegraphics\[.*?\]?\{(.*?)\}')
begin_cmd_match = re.compile(r'\\begin{([^}]+?)}(?:(?:\[([^\]]+?)\])|.*)')
newcmd_match = re.compile(r'\\.+?\{(.*?)\}\{(.*)\}')
# newcmd_match_with_var = re.compile(r'\\[^\\]+?\{(.*?)\}\{(.*?)\}')
vars_match = re.compile(r'\{(.+?)\}')
def get_vars(line):
res = list()
open_braces = 0
one_var = ''
for char in line.strip():
if char == '}':
|
open_braces -= 1
if open
|
_braces > 0:
one_var += char
elif open_braces == 0 and one_var:
res.append(one_var)
one_var = ''
if char == '{':
open_braces += 1
return res
class FileIter:
def __init__(self, filename):
self.fn = filename
self.f = open(self.fn, 'r')
def get_line(self):
for line in self.f:
yield line
self.f.close()
|
CSSCorp/openstack-automation
|
file_root/_modules/neutron.py
|
Python
|
gpl-2.0
| 13,539
| 0
|
# -*- coding: utf-8 -*-
'''
Module for handling openstack neutron calls.
:maintainer: <akilesh1597@gmail.com>
:maturity: new
:platform: all
:optdepends: - neutronclient Python adapter
:configuration: This module is not usable until the following are specified
either in a pillar or in the minion's config file::
keystone.user: admin
keystone.password: verybadpass
keystone.tenant: admin
keystone.tenant_id: f80919baedab48ec8931f200c65a50df
keystone.insecure: False #(optional)
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
If configuration for multiple openstack accounts is required, they can be
set up as different configuration profiles:
For example::
openstack1:
keystone.user: admin
keystone.password: verybadpass
keystone.tenant: admin
keystone.tenant_id: f80919baedab48ec8931f200c65a50df
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
openstack2:
keystone.user: admin
keystone.password: verybadpass
keystone.tenant: admin
keystone.tenant_id: f80919baedab48ec8931f200c65a50df
keystone.auth_url: 'http://127.0.0.2:5000/v2.0/'
With this configuration in place, any of the neutron functions can make
use of a configuration profile by declaring it explicitly.
For example::
salt '*' neutron.list_subnets profile=openstack1
Please check 'https://wiki.openstack.org/wiki/Neutron/APIv2-specification'
for the correct arguments to the api
'''
import logging
from functools import wraps
LOG = logging.getLogger(__name__)
# Import third party libs
HAS_NEUTRON = False
try:
from neutronclient.v2_0 import client
HAS_NEUTRON = True
except ImportError:
pass
__opts__ = {}
def __virtual__():
'''
Only load this module if neutron
is installed on this minion.
'''
if HAS_NEUTRON:
return 'neutron'
return False
def _autheticate(func_name):
'''
Authenticate requests with the salt keystone module and format return data
'''
@wraps(func_name)
def decorator_method(*args, **kwargs):
'''
Authenticate request and format return data
'''
connection_args = {'profile': kwargs.get('profile', None)}
nkwargs = {}
for kwarg in kwargs:
if 'connection_' in kwarg:
connection_args.update({kwarg: kwargs[kwarg]})
elif '__' not in kwarg:
nkwargs.update({kwarg: kwargs[kwarg]})
kstone = __salt__['keystone.auth'](**connection_args)
token = kstone.auth_token
endpoint = kstone.service_catalog.url_for(
service_type='network',
endpoint_type='publicURL')
neutron_interface = client.Client(
endpoint_url=endpoint, token=token)
LOG.error('calling with args ' + str(args))
LOG.error('calling with kwargs ' + str(nkwargs))
return_data = func_name(neutron_interface, *args, **nkwargs)
LOG.error('got return data ' + str(return_data))
if isinstance(return_data, list):
# format list as a dict for rendering
return {data.get('name', None) or data['id']: data
for data in return_data}
return return_data
return decorator_method
@_autheticate
def list_floatingips(neutron_interface, **kwargs):
'''
list all floatingips
CLI Example:
.. code-block:: bash
salt '*' neutron.list_floatingips
'''
return neutron_interface.list_floatingips(**kwargs)['floatingips']
@_autheticate
def list_security_groups(neutron_interface, **kwargs):
'''
list all security_groups
CLI Example:
.. code-block:: bash
salt '*' neutron.list_security_groups
'''
return neutron_interface.list_security_groups(**kwargs)['security_groups']
@_autheticate
def list_subnets(neutron_interface, **kwargs):
'''
list all subnets
CLI Example:
.. code-block:: bash
salt '*' neutron.list_subnets
'''
return neutron_interface.list_subnets(**kwargs)['subnets']
@_autheticate
def list_networks(neutron_interface, **kwargs):
'''
list all networks
CLI Example:
.. code-block:: bash
salt '*' neutron.list_networks
'''
return neutron_interface.list_networks(**kwargs)['networks']
@_autheticate
def list_ports(neutron_interface, **kwargs):
'''
list all ports
CLI Example:
.. code-block:: bash
salt '*' neutron.list_ports
'''
return neutron_interface.list_ports(**kwargs)['ports']
@_autheticate
def list_routers(neutron_interface, **kwargs):
'''
list all routers
CLI Example:
.. code-block:: bash
salt '*' neutron.list_routers
'''
return neutron_interface.list_routers(**kwargs)['routers']
@_autheticate
def update_floatingip(neutron_interface, fip, port_id=None):
'''
update floating IP. Should be used to associate and disassociate
floating IP with instance
CLI Example:
.. code-block:: bash
to associate with an instance's port
salt '*' neutron.update_floatingip openstack-floatingip-id port-id
to disassociate from an instance's port
salt '*' neutron.update_floatingip openstack-floatingip-id
'''
neutron_interface.update_floatingip(fip, {"floatingip":
{"port_id": port_id}})
@_autheticate
def update_subnet(neutron_interface, subnet_id, **subnet_params):
'''
update given subnet
CLI Example:
.. code-block:: bash
salt '*' neutron.update_subnet opensta
|
ck-subnet-id name='new_name'
'''
neutron_interface.update_subnet(subnet_id, {'subnet': subnet_params})
@_autheticate
def update_router(neutron_interface, router_id, **router_params):
'''
update given router
CLI Example:
.. code-block:: bash
salt '*' neut
|
ron.update_router openstack-router-id name='new_name'
external_gateway='openstack-network-id' administrative_state=true
'''
neutron_interface.update_router(router_id, {'router': router_params})
@_autheticate
def router_gateway_set(neutron_interface, router_id, external_gateway):
'''
Set external gateway for a router
CLI Example:
.. code-block:: bash
salt '*' neutron.update_router openstack-router-id openstack-network-id
'''
neutron_interface.update_router(
router_id, {'router': {'external_gateway_info':
{'network_id': external_gateway}}})
@_autheticate
def router_gateway_clear(neutron_interface, router_id):
'''
Clear external gateway for a router
CLI Example:
.. code-block:: bash
salt '*' neutron.update_router openstack-router-id
'''
neutron_interface.update_router(
router_id, {'router': {'external_gateway_info': None}})
@_autheticate
def create_router(neutron_interface, **router_params):
'''
Create OpenStack Neutron router
CLI Example:
.. code-block:: bash
salt '*' neutron.create_router name=R1
'''
response = neutron_interface.create_router({'router': router_params})
if 'router' in response and 'id' in response['router']:
return response['router']['id']
@_autheticate
def router_add_interface(neutron_interface, router_id, subnet_id):
'''
Attach router to a subnet
CLI Example:
.. code-block:: bash
salt '*' neutron.router_add_interface openstack-router-id subnet-id
'''
neutron_interface.add_interface_router(router_id, {'subnet_id': subnet_id})
@_autheticate
def router_rem_interface(neutron_interface, router_id, subnet_id):
'''
Dettach router from a subnet
CLI Example:
.. code-block:: bash
salt '*' neutron.router_rem_interface openstack-router-id subnet-id
'''
neutron_interface.remove_interface_router(
router_id, {'subnet_id': subnet_id})
@_autheticate
def create_security_group(neutron_interface, **sg_params):
'''
Create a new security group
CLI Example:
.. code-block:: ba
|
jairtrejo/doko
|
app/rohan/urls.py
|
Python
|
mit
| 581
| 0.001721
|
from django.conf import settings
from django.conf.urls
|
import patterns, include, url
from django.contrib import admin
from django.conf.urls.static import static
from .views import HomeView
# Uncomment the next two lines to enable the admin:
admin.autodiscover()
urlpatterns = (
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) +
patterns(
'',
url('^$', HomeView.as_view(), name='
|
home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^social/', include('socialregistration.urls', namespace='socialregistration')),
)
)
|
BertrandBordage/django-cachalot
|
docs/conf.py
|
Python
|
bsd-3-clause
| 8,766
| 0.006046
|
# -*- coding: utf-8 -*-
#
# django-cachalot documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 28 22:46:50 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
import cachalot
# This sets up Django, necessary for autodoc
import runtests
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-cachalot'
copyright = '2014-2016, Bertrand Bordage'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '%s.%s' % cachalot.VERSION[:2]
# The full version, including alpha/beta/rc tags.
release = cachalot.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-cachalotdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize
|
': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'django-cachalot.tex', u'django-cachalot Documentation',
u'Bertrand Bordage'
|
, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-cachalot', u'django-cachalot Documentation',
[u'Bertrand Bordage'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-cachalot', u'django-cachalot Documentation',
|
dogsaur/SMS
|
db_repository/versions/007_migration.py
|
Python
|
mit
| 1,457
| 0.001373
|
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
product = Table('product', pre_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('product_name', VARCHAR),
Column('bar_code', INTEGER),
Column('price', NUMERIC),
Column('picture_id', INTEGER),
Column('category', VARCHAR),
Column('inprice', NUMERIC),
Column('size', VARCHAR),
Column('supply', INTEGER),
)
product = Table('product', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('product_name', String),
Column(
|
'category', String),
Column('bar_code', Integer),
Column('size', String),
Column('inprice', Numeric),
Column('price', Numeric),
Column('supply_id', Integer),
Column('picture_id', Integer),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don'
|
t create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['product'].columns['supply'].drop()
post_meta.tables['product'].columns['supply_id'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['product'].columns['supply'].create()
post_meta.tables['product'].columns['supply_id'].drop()
|
JoseKilo/week_parser
|
tests/unit/test_week_parser.py
|
Python
|
mit
| 5,936
| 0
|
import pytest
import six
from mock import call, patch
from tests import utils
from week_parser.base import parse_row, parse_week, populate_extra_data
from week_parser.main import PrettyPrinter
def test_populate_extra_data_no_days():
"""
If we haven't found any days data, there is not extra data to add
"""
week_data = {}
description = '__DESCRIPTION__'
populate_extra_data(week_data, description)
assert week_data == {}
def test_populate_extra_data_square_day():
"""
If we have found a 'square' day, the description and square value is added
"""
value = 7
week_data = {'mon': {'value': value}}
description = '__DESCRIPTION__'
populate_extra_data(week_data, description)
assert week_data == {
'mon': {
'value': value,
'square': value ** 2,
'description': '{} {}'.format(description, value ** 2)
}
}
def test_populate_extra_data_double_day():
"""
If we have found a 'double' day, the description and double value is added
"""
value = 7
week_data = {'thu': {'value': value}}
description = '__DESCRI
|
PTION__'
populate_extra_data(week_data, description)
assert week_data == {
'thu': {
'value': value,
'double': value * 2,
'description': '{} {}'.format(description, value * 2)
}
}
def test_parse_row_single_day():
"""
If the input row contains a single day, it is outputted
"""
row = {'
|
mon': '3', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {'mon': {'day': 'mon', 'value': 3}}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_row_day_range():
"""
If the input row contains a day range, it is outputted
"""
row = {'mon-wed': '3', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {
'mon': {'day': 'mon', 'value': 3},
'tue': {'day': 'tue', 'value': 3},
'wed': {'day': 'wed', 'value': 3},
}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_row_extra_columns():
"""
If the input row contains any extra columns, they are skipped
"""
row = {'wed': '2', 'description': '__DESCRIPTION__',
'__FOO__': '__BAR__', '__ANYTHING__': '__ELSE__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {'wed': {'day': 'wed', 'value': 2}}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_row_not_int_value():
"""
If the day value is not an integer, we get a ValueError
"""
row = {'mon': '__NOT_A_NUMBER__', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
with pytest.raises(ValueError) as exc:
parse_row(row)
assert mock_populate.call_count == 0
assert str(exc.value) == (
"invalid literal for int() with base 10: '__NOT_A_NUMBER__'")
def test_parse_row_invalid_day_range():
"""
If the input row contains an invalid day range, we skip it
"""
row = {'foo-bar': '3', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_row():
"""
An input row may contain any combination of day ranges
"""
row = {'mon-tue': '3', 'wed-thu': '2', 'fri': '1',
'__SOME__': '__DATA__', 'description': '__DESCRIPTION__'}
with patch('week_parser.base.populate_extra_data') as mock_populate:
week_data = parse_row(row)
assert week_data == {
'mon': {'day': 'mon', 'value': 3},
'tue': {'day': 'tue', 'value': 3},
'wed': {'day': 'wed', 'value': 2},
'thu': {'day': 'thu', 'value': 2},
'fri': {'day': 'fri', 'value': 1},
}
assert mock_populate.call_args_list == [call(week_data, '__DESCRIPTION__')]
def test_parse_week_empty_file():
"""
We can process an empty file
"""
filename = 'anything.csv'
with utils.mock_open(file_content='') as mock_open:
with patch('week_parser.base.parse_row') as mock_parse_week:
result = parse_week(filename)
assert result == []
assert mock_open.call_args_list == [call(filename)]
assert mock_parse_week.call_count == 0
def test_parse_week_valid_file():
"""
We can process a file with valid content
"""
filename = 'anything.csv'
csv_data = ('mon,tue,some_column1,wed,thu,fri,description\n'
'1,5,data,2,3,3,first_desc\n')
expected_row = {'mon': '1', 'tue': '5', 'wed': '2', 'thu': '3', 'fri': '3',
'description': 'first_desc', 'some_column1': 'data'}
with utils.mock_open(file_content=csv_data) as mock_open:
with patch('week_parser.base.parse_row') as mock_parse_row:
mock_parse_row.return_value = {'mon': {'day': 'mon'}}
result = parse_week(filename)
assert result == [{'day': 'mon'}]
assert mock_open.call_args_list == [call(filename)]
assert mock_parse_row.call_args_list == [call(expected_row)]
def test_pprint_bytes(capsys):
printer = PrettyPrinter()
printer.pprint(six.b('__FOO__'))
out, err = capsys.readouterr()
assert err == ''
assert out == "'__FOO__'\n"
def test_pprint_unicode(capsys):
printer = PrettyPrinter()
printer.pprint(six.u('__FOO__'))
out, err = capsys.readouterr()
assert err == ''
assert out == "'__FOO__'\n"
|
anntzer/numpy
|
numpy/core/memmap.py
|
Python
|
bsd-3-clause
| 11,688
| 0.000684
|
from contextlib import nullcontext
import numpy as np
from .numeric import uint8, ndarray, dtype
from numpy.compat import os_fspath, is_pathlib_path
from numpy.core.overrides import set_module
__all__ = ['memmap']
dtypedescr = dtype
valid_filemodes = ["r", "c", "r+", "w+"]
writeable_filemodes = ["r+", "w+"]
mode_equivalents = {
"readonly":"r",
"copyonwrite":"c",
"readwrite":"r+",
"write":"w+"
}
@set_module('numpy')
class memmap(ndarray):
"""Create a memory-map to an array stored in a *binary* file on disk.
Memory-mapped files are used for accessing small segments of large files
on disk, without reading the entire file into memory. NumPy's
memmap's are array-like objects. This differs from Python's ``mmap``
module, which uses file-like objects.
This subclass of ndarray has some unpleasant interactions with
some operations, because it doesn't quite fit properly as a subclass.
An alternative to using this subclass is to create the ``mmap``
object yourself, then create an ndarray with ndarray.__new__ directly,
passing the object created in its 'buffer=' parameter.
This class may at some point be turned into a factory function
which returns a view into an mmap buffer.
Flush the memmap instance to write the changes to the file. Currently there
is no API to close the underlying ``mmap``. It is tricky to ensure the
resource is actually closed, since it may be shared between different
memmap instances.
Parameters
----------
filename : str, file-like object, or pathlib.Path instance
The file name or file object to be used as the array data buffer.
dtype : data-type, optional
The data-type used to interpret the file contents.
Default is `uint8`.
mode
|
: {'r+', 'r', 'w+', 'c'}, optional
The file is opened in this mode:
+------+-------------------------------------------------------------+
| 'r' | Open existing file for reading only. |
+------+-------------------------------------------------------------+
| 'r+' | Open existing fi
|
le for reading and writing. |
+------+-------------------------------------------------------------+
| 'w+' | Create or overwrite existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'c' | Copy-on-write: assignments affect data in memory, but |
| | changes are not saved to disk. The file on disk is |
| | read-only. |
+------+-------------------------------------------------------------+
Default is 'r+'.
offset : int, optional
In the file, array data starts at this offset. Since `offset` is
measured in bytes, it should normally be a multiple of the byte-size
of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of
file are valid; The file will be extended to accommodate the
additional data. By default, ``memmap`` will start at the beginning of
the file, even if ``filename`` is a file pointer ``fp`` and
``fp.tell() != 0``.
shape : tuple, optional
The desired shape of the array. If ``mode == 'r'`` and the number
of remaining bytes after `offset` is not a multiple of the byte-size
of `dtype`, you must specify `shape`. By default, the returned array
will be 1-D with the number of elements determined by file size
and data-type.
order : {'C', 'F'}, optional
Specify the order of the ndarray memory layout:
:term:`row-major`, C-style or :term:`column-major`,
Fortran-style. This only has an effect if the shape is
greater than 1-D. The default order is 'C'.
Attributes
----------
filename : str or pathlib.Path instance
Path to the mapped file.
offset : int
Offset position in the file.
mode : str
File mode.
Methods
-------
flush
Flush any changes in memory to file on disk.
When you delete a memmap object, flush is called first to write
changes to disk.
See also
--------
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
The memmap object can be used anywhere an ndarray is accepted.
Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns
``True``.
Memory-mapped files cannot be larger than 2GB on 32-bit systems.
When a memmap causes a file to be created or extended beyond its
current size in the filesystem, the contents of the new part are
unspecified. On systems with POSIX filesystem semantics, the extended
part will be filled with zero bytes.
Examples
--------
>>> data = np.arange(12, dtype='float32')
>>> data.resize((3,4))
This example uses a temporary file so that doctest doesn't write
files to your directory. You would use a 'normal' filename.
>>> from tempfile import mkdtemp
>>> import os.path as path
>>> filename = path.join(mkdtemp(), 'newfile.dat')
Create a memmap with dtype and shape that matches our data:
>>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
>>> fp
memmap([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]], dtype=float32)
Write data to memmap array:
>>> fp[:] = data[:]
>>> fp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fp.filename == path.abspath(filename)
True
Flushes memory changes to disk in order to read them back
>>> fp.flush()
Load the memmap and verify data was stored:
>>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> newfp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Read-only memmap:
>>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> fpr.flags.writeable
False
Copy-on-write memmap:
>>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4))
>>> fpc.flags.writeable
True
It's possible to assign to copy-on-write array, but values are only
written into the memory copy of the array, and not written to disk:
>>> fpc
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fpc[0,:] = 0
>>> fpc
memmap([[ 0., 0., 0., 0.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
File on disk is unchanged:
>>> fpr
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Offset into a memmap:
>>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16)
>>> fpo
memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32)
"""
__array_priority__ = -100.0
def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
shape=None, order='C'):
# Import here to minimize 'import numpy' overhead
import mmap
import os.path
try:
mode = mode_equivalents[mode]
except KeyError as e:
if mode not in valid_filemodes:
raise ValueError(
"mode must be one of {!r} (got {!r})"
.format(valid_filemodes + list(mode_equivalents.keys()), mode)
) from None
if mode == 'w+' and shape is None:
raise ValueError("shape must be given")
if hasattr(filename, 'read'):
f_ctx = nullcontext(filename)
else:
f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b')
with f_ctx as fid:
fid.seek(0, 2)
flen = fid.tell()
descr = dtypedescr(dtype)
|
nullable/libxmlquery
|
documentation/conf.py
|
Python
|
mit
| 8,376
| 0.00693
|
# -*- coding: utf-8 -*-
#
# libxmlquery documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 5 15:13:45 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'libxmlquery'
copyright = u'2010, Frederico Gonçalves, Vasco Fernandes'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false val
|
ue, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
|
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'libxmlquerydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'libxmlquery.tex', u'libxmlquery Documentation',
u'Frederico Gonçalves, Vasco Fernandes', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'libxmlquery', u'libxmlquery Documentation',
[u'Frederico Gonçalves, Vasco Fernandes'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'libxmlquery'
epub_author = u'Frederico Gonçalves, Vasco Fernandes'
epub_publisher = u'Frederico Gonçalves, Vasco Fernandes'
epub_copyright = u'2010, Frederico Gonçalves, Vasco Fernandes'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files tha
|
paulrouget/servo
|
tests/wpt/web-platform-tests/xhr/resources/access-control-basic-whitelist-response-headers.py
|
Python
|
mpl-2.0
| 571
| 0
|
def main(request, response):
headers = {
# CORS-safelisted
"content-type": "text/plain",
"cache-control": "n
|
o cache",
"content-language": "en",
"expires": "Fri, 30 Oct 1998 14:19:41 GMT",
"last-modified": "Tue, 15 Nov 1994 12:45:26 GMT",
"pragma": "no-cache",
# Non-CORS-safelisted
"x-test": "foobar",
"Access-Control-Allow-Origin": "*"
}
for header in headers:
response.he
|
aders.set(header, headers[header])
response.content = "PASS: Cross-domain access allowed."
|
endlessm/chromium-browser
|
third_party/catapult/devil/devil/android/cpu_temperature_test.py
|
Python
|
bsd-3-clause
| 4,988
| 0.005413
|
#!/usr/bin/env python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for the contents of cpu_temperature.py
"""
# pylint: disable=unused-argument
import logging
import unittest
from devil import devil_env
from devil.android import cpu_temperature
from devil.android import device_utils
from devil.utils import mock_calls
from devil.android.sdk import adb_wrapper
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock # pylint: disable=import-error
class CpuTemperatureTest(mock_calls.TestCase):
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def setUp(self):
# Mock the device
self.mock_device = mock.Mock(spec=device_utils.DeviceUtils)
self.mock_device.build_product = 'blueline'
self.mock_device.adb = mock.Mock(spec=adb_wrapper.AdbWrapper)
self.mock_device.FileExists.return_value = True
self.cpu_temp = cpu_temperature.CpuTemperature(self.mock_device)
self.cpu_temp.InitThermalDeviceInformation()
class CpuTemperatureInitTest(unittest.TestCase):
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def testInitWithDeviceUtil(self):
d = mock.Mock(spec=device_utils.DeviceUtils)
d.build_product = 'blueline'
c = cpu_temperature.CpuTemperature(d)
self.assertEqual(d, c.GetDeviceForTesting())
def testInitWithMissing_fails(self):
with self.assertRaises(TypeError):
cpu_temperature.CpuTemperature(None)
with self.assertRaises(TypeError):
cpu_temperature.CpuTemperature('')
class CpuTemperatureGetThermalDeviceInformationTest(CpuTemperatureTest):
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def testGetThermalDeviceInformation_noneWhenIncorrectLabel(self):
invalid_device = mock.Mock(spec=device_utils.DeviceUtils)
invalid_device.build_product = 'invalid_name'
c = cpu_temperature.CpuTemperature(invalid_device)
c.InitThermalDeviceInformation()
self.assertEqual(c.GetDeviceInfoForTesting(), None)
def testGetThermalDeviceInformation_getsCorrectInformation(self):
correct_information = {
'cpu0': '/sys/class/thermal/thermal_zone11/temp',
'cpu1': '/sys/class/thermal/thermal_zone12/temp',
'cpu2': '/sys/class/thermal/thermal_zone13/temp',
'cpu3': '/sys/class/thermal/thermal_zone14/temp',
'cpu4': '/sys/class/thermal/thermal_zone15/temp',
'cpu5': '/sys/class/thermal/thermal_zone16/temp',
'cpu6': '/sys/class/thermal/thermal_zone17/temp',
'cpu7': '/sys/class/thermal/thermal_zone18/temp'
}
self.assertEqual(
cmp(correct_information,
self.cpu_temp.GetDeviceInfoForTesting().get('cpu_temps')), 0)
class CpuTemperatureIsSupportedTest(CpuTemperatureTest):
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def testIsSupported_returnsTrue(self):
d = mock.Mock(spec=device_utils.DeviceUtils)
d.build_product = 'blueline'
d.FileExists.return_value = True
c = cpu_temperature.CpuTemperature(d)
self.assertTrue(c.IsSupported())
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def testIsSupported_returnsFalse(self):
d = mock.Mock(spec=device_utils.DeviceUtils)
d.build_product = 'blueline'
d.FileExists.return_value = False
c = cpu_temperature.CpuTemperature(d)
self.assertFa
|
lse(c.IsSupported())
class CpuTemperatureLetCpuCoolToTemperatureTest(CpuTemperatureTest):
#
|
Return values for the mock side effect
cooling_down0 = (
[45000
for _ in range(8)] + [43000
for _ in range(8)] + [41000 for _ in range(8)])
@mock.patch('time.sleep', mock.Mock())
def testLetBatteryCoolToTemperature_coolWithin24Calls(self):
self.mock_device.ReadFile = mock.Mock(side_effect=self.cooling_down0)
self.cpu_temp.LetCpuCoolToTemperature(42)
self.mock_device.ReadFile.assert_called()
self.assertEquals(self.mock_device.ReadFile.call_count, 24)
cooling_down1 = [45000 for _ in range(8)] + [41000 for _ in range(16)]
@mock.patch('time.sleep', mock.Mock())
def testLetBatteryCoolToTemperature_coolWithin16Calls(self):
self.mock_device.ReadFile = mock.Mock(side_effect=self.cooling_down1)
self.cpu_temp.LetCpuCoolToTemperature(42)
self.mock_device.ReadFile.assert_called()
self.assertEquals(self.mock_device.ReadFile.call_count, 16)
constant_temp = [45000 for _ in range(40)]
@mock.patch('time.sleep', mock.Mock())
def testLetBatteryCoolToTemperature_timeoutAfterThree(self):
self.mock_device.ReadFile = mock.Mock(side_effect=self.constant_temp)
self.cpu_temp.LetCpuCoolToTemperature(42)
self.mock_device.ReadFile.assert_called()
self.assertEquals(self.mock_device.ReadFile.call_count, 24)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
|
dtysky/Gal2Renpy
|
Gal2Renpy/SpSyntax/ScSp.py
|
Python
|
mit
| 352
| 0.073864
|
#coding:utf-8
#################################
#Copyright(c) 2014 dtysky
######
|
###########################
im
|
port G2R
class ScSp(G2R.SpSyntax):
def Show(self,Flag,Attrs,US,UT,Tmp,FS):
sw=''
name,Attrs=self.Check(Flag,Attrs,UT,FS)
if Attrs['k']=='Main':
sw+=' $ store.chapter='
sw+="'Chapter."+Attrs['cp']+Attrs['sc']+"'\n"
return sw
|
ULHPC/easybuild-framework
|
easybuild/toolchains/compiler/__init__.py
|
Python
|
gpl-2.0
| 1,248
| 0.001603
|
##
# Copyright 2012-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Declaration of toolchains.compiler namespace.
:author: Stijn De Weirdt (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
impo
|
rt pk
|
g_resources
pkg_resources.declare_namespace(__name__)
|
srkukarni/heron
|
integration_test/src/python/integration_test/core/integration_test_spout.py
|
Python
|
apache-2.0
| 4,617
| 0.007581
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base spout for integration tests"""
import copy
from heron.common.src.python.utils.log import Log
from heron.api.src.python.spout.spout import Spout
from heron.api.src.python.stream import Stream
from heron.api.src.python.component.component_spec import HeronComponentSpec
import heron.common.src.python.pex_loader as pex_loader
from ..core import constants as integ_const
class IntegrationTestSpout(Spout):
"""Base spout for integration test
Every spout of integration test topology consists of this instance, each delegating user's spout.
"""
outputs = [Stream(fields=[integ_const.INTEGRATION_TEST_TERMINAL],
name=integ_const.INTEGRATION_TEST_CONTROL_STREAM_ID)]
@classmethod
def spec(cls, name, par, config, user_spout_classpath, user_output_fields=None):
python_class_path = "%s.%s" % (cls.__module__, cls.__name__)
config[integ_const.USER_SPOUT_CLASSPATH] = user_spout_classpath
|
# avoid modification to cls.outputs
_outputs = copy.copy(cls.outputs)
if user_outpu
|
t_fields is not None:
_outputs.extend(user_output_fields)
return HeronComponentSpec(name, python_class_path, is_spout=True, par=par,
inputs=None, outputs=_outputs, config=config)
def initialize(self, config, context):
user_spout_classpath = config.get(integ_const.USER_SPOUT_CLASSPATH, None)
if user_spout_classpath is None:
raise RuntimeError("User defined integration test spout was not found")
user_spout_cls = self._load_user_spout(context.get_topology_pex_path(), user_spout_classpath)
self.user_spout = user_spout_cls(delegate=self)
self.max_executions = config.get(integ_const.USER_MAX_EXECUTIONS, integ_const.MAX_EXECUTIONS)
assert isinstance(self.max_executions, int) and self.max_executions > 0
Log.info("Max executions: %d" % self.max_executions)
self.tuples_to_complete = 0
self.user_spout.initialize(config, context)
@staticmethod
def _load_user_spout(pex_file, classpath):
pex_loader.load_pex(pex_file)
cls = pex_loader.import_and_get_class(pex_file, classpath)
return cls
@property
def is_done(self):
return self.max_executions == 0
def next_tuple(self):
if self.is_done:
return
self.max_executions -= 1
Log.info("max executions: %d" % self.max_executions)
self.user_spout.next_tuple()
if self.is_done:
self._emit_terminal_if_needed()
Log.info("This topology is finished.")
def ack(self, tup_id):
Log.info("Received an ack with tuple id: %s" % str(tup_id))
self.tuples_to_complete -= 1
if tup_id != integ_const.INTEGRATION_TEST_MOCK_MESSAGE_ID:
self.user_spout.ack(tup_id)
self._emit_terminal_if_needed()
def fail(self, tup_id):
Log.info("Received a fail message with tuple id: %s" % str(tup_id))
self.tuples_to_complete -= 1
if tup_id != integ_const.INTEGRATION_TEST_MOCK_MESSAGE_ID:
self.user_spout.fail(tup_id)
self._emit_terminal_if_needed()
def emit(self, tup, tup_id=None, stream=Stream.DEFAULT_STREAM_ID,
direct_task=None, need_task_ids=None):
"""Emits from this integration test spout
Overriden method which will be called when user's spout calls emit()
"""
# if is_control True -> control stream should not count
self.tuples_to_complete += 1
if tup_id is None:
Log.info("Add tup_id for tuple: %s" % str(tup))
_tup_id = integ_const.INTEGRATION_TEST_MOCK_MESSAGE_ID
else:
_tup_id = tup_id
super(IntegrationTestSpout, self).emit(tup, _tup_id, stream, direct_task, need_task_ids)
def _emit_terminal_if_needed(self):
Log.info("is_done: %s, tuples_to_complete: %s" % (self.is_done, self.tuples_to_complete))
if self.is_done and self.tuples_to_complete == 0:
Log.info("Emitting terminals to downstream")
super(IntegrationTestSpout, self).emit([integ_const.INTEGRATION_TEST_TERMINAL],
stream=integ_const.INTEGRATION_TEST_CONTROL_STREAM_ID)
|
rsalmaso/django-cms
|
cms/test_utils/project/pluginapp/plugins/style/models.py
|
Python
|
bsd-3-clause
| 1,967
| 0.001525
|
from django.db import models
from cms.models import CMSPlugin
CLASS_CHOICES = ['container', 'content', 'teaser']
CLASS_CHOICES = tuple((entry, entry) for entry in CLASS_CHOICES)
TAG_CHOICES = [
'div', 'article', 'section', 'header', 'footer', 'aside',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6'
]
TAG_CHOICES = tuple((entry, entry) for entry in TAG_CHOICES)
class Style(CMSPlugin):
"""
Renders a given ``TAG_CHOICES`` element with additional attributes
"""
label = models.CharField(
verbose_name='Label',
blank=True,
max_length=255,
help_text='Overrides the display name in the structure mode.',
)
tag_type = models.CharField(
verbose_name='Tag type',
choices=TAG_CHOICES,
default=TAG_CHOICES[0][0],
max_length=255,
)
class_name = models.CharField(
verbose_name='Class name',
choices=CLASS_CHOICES,
default=CLASS_CHOICES[0][0],
blank=True,
max_length=255,
)
additional_classes = models.CharField(
verbose_name='Additional classes',
blank=True,
max_length=255,
)
def __str__(self):
return self.label or self.tag_type or str(self.pk)
def get_short_description(self):
# display format:
# Style label <tag> .list.of.classes #id
display = []
classes = []
if self.label:
display.append(self.label)
if self.tag_type:
display.append('<{0}>'.format(self.tag_type))
|
if self.class_name:
classes.append(self.class_name)
if self.additional_classes:
classes.extend(item.strip() for item in self.addi
|
tional_classes.split(',') if item.strip())
display.append('.{0}'.format('.'.join(classes)))
return ' '.join(display)
def get_additional_classes(self):
return ' '.join(item.strip() for item in self.additional_classes.split(',') if item.strip())
|
t11e/django
|
django/contrib/gis/db/backends/postgis/models.py
|
Python
|
bsd-3-clause
| 2,022
| 0.000989
|
"""
The GeometryColumns and SpatialRefSys models for the PostGIS backend.
"""
from django.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
class GeometryColumns(models.Model):
"""
The 'geometry_columns' table from the PostGIS. See the PostGIS
documentation at Ch. 4.2.2.
"""
f_table_catalog = models.CharField(max_length=256)
f_table_schema = models.CharField(max_length=256)
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
type = models.CharField(max_length=30)
class Meta:
app_label = 'gis'
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'f_geometry_column'
def __unicode__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from PostGIS. See
|
the PostGIS
documentaiton at Ch. 4.2.1.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=25
|
6)
auth_srid = models.IntegerField()
srtext = models.CharField(max_length=2048)
proj4text = models.CharField(max_length=2048)
class Meta:
app_label = 'gis'
db_table = 'spatial_ref_sys'
managed = False
@property
def wkt(self):
return self.srtext
@classmethod
def wkt_col(cls):
return 'srtext'
|
JulyKikuAkita/PythonPrac
|
cs15211/ValidAnagram.py
|
Python
|
apache-2.0
| 3,340
| 0.001796
|
__source__ = 'https://leetcode.com/problems/valid-anagram/description/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/valid-anagram.py
# Time: O(n)
# Space: O(1)
#
# Description: Leetcode # 242. Valid Anagram
#
# Given two strings s and t, write a function to
# determine if t is an anagram of s.
#
# For example,
# s = "anagram", t = "nagaram", return true.
# s = "rat", t = "car", return false.
#
# Note:
# You may assume the string contains only lowercase alphabets.
#
# Companies
# Amazon Uber Yelp
# Related Topics
# Hash Table Sort
# Sim
|
ilar Questions
# Group Anagrams Palindrome Permutation Find All Anagrams in a String
#
import unittest
class Solution:
# @param {string} s
# @param {string} t
# @return {b
|
oolean}
def isAnagram(self, s, t):
if len(s) != len(t):
return False
count = {}
for c in s:
if c.lower() in count:
count[c.lower()] += 1
else:
count[c.lower()] = 1
for c in t:
if c.lower() in count:
count[c.lower()] -= 1
else:
count[c.lower()] = -1
if count[c.lower()] < 0:
return False
return True
# Time: O(nlogn)
# Space: O(n)
class Solution2:
# @param {string} s
# @param {string} t
# @return {boolean}
def isAnagram(self, s, t):
return sorted(s) == sorted(t)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
print Solution().isAnagram('a', 'a')
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/valid-anagram/solution/
#
# 4ms 71.69%
class Solution {
public boolean isAnagram(String s, String t) {
if (s.length() != t.length()) {
return false;
}
int[] count = new int[26];
for (int i = 0; i < s.length(); i++) {
count[s.charAt(i) - 'a']++;
}
for (int i = 0; i < t.length(); i++) {
count[t.charAt(i) - 'a']--;
}
for (int i = 0; i < 26; i++) {
if (count[i] != 0) {
return false;
}
}
return true;
}
}
Approach #1 (Sorting) [Accepted]
# Time: O(nlogn)
# Space: O(1)
#7ms 41.66%
class Solution {
public boolean isAnagram(String s, String t) {
if (s.length() != t.length()) {
return false;
}
char[] str1 = s.toCharArray();
char[] str2 = t.toCharArray();
Arrays.sort(str1);
Arrays.sort(str2);
return Arrays.equals(str1, str2);
}
}
# 3ms 81.95%
class Solution {
public boolean isAnagram(String s, String t) {
int [] alp = new int[26];
for(int i = 0;i<s.length();i++) alp[s.charAt(i) - 'a']++;
for(int i = 0;i<t.length();i++) alp[t.charAt(i) - 'a']--;
for(int i : alp) if(i!=0) return false;
return true;
}
}
# 6ms 49.29%
class Solution {
public boolean isAnagram(String s, String t) {
return Arrays.equals(countCharacters(s), countCharacters(t));
}
private int[] countCharacters(String s) {
int[] count = new int[26];
for (int i = 0; i < s.length(); i++) {
count[s.charAt(i) - 'a']++;
}
return count;
}
}
'''
|
mblayman/lcp
|
conductor/accounts/tests/test_forms.py
|
Python
|
bsd-2-clause
| 5,205
| 0.000192
|
from typing import Dict
from unittest import mock
from conductor.accounts.forms import DeactivateForm, SignupForm
from conductor.tests import TestCase
class TestSignupForm(TestCase):
def test_valid(self) -> None:
product_plan = self.ProductPlanFactory.create()
data = {
"username": "matt",
"email": "matt@test.com",
"password": "asecrettoeverybody",
"stripe_token": "tok_1234",
"postal_code": "12345",
}
form = SignupForm(product_plan, data=data)
self.assertTrue(form.is_valid())
self.assertEqual(product_plan, form.product_plan)
def test_required(self) -> None:
product_plan = self.ProductPlanFactory.create()
data: Dict[str, str] = {}
form = SignupForm(product_plan, data=data)
self.assertFalse(form.is_valid())
self.assertIn("username", form.errors)
self.assertIn("email", form.errors)
self.assertIn("password", form.errors)
self.assertIn("stripe_token", form.errors)
self.assertNotIn("postal_code", form.errors)
def test_invalid_password(self) -> None:
product_plan = self.ProductPlanFactory.create()
# Test similar username and password to ensure a user instance
# is present and valuable.
data = {
"username": "mattlayman",
"email": "matt@test.com",
"password": "mattlayman",
"stripe_token": "tok_1234",
"postal_code": "12345",
}
|
form = SignupForm(product_plan, data=data)
self.assertFalse(form.is_valid())
self.assertIn("password", form.errors)
def test_unique_email(self) -> None:
product_plan = self.ProductPlanFactory.create()
self.UserFactory.create(email="matt@test.com")
data = {
"username": "matt",
"email": "matt@test.com",
"password": "asecrettoeverybody",
|
"stripe_token": "tok_1234",
"postal_code": "12345",
}
form = SignupForm(product_plan, data=data)
self.assertFalse(form.is_valid())
self.assertIn("email", form.errors)
def test_unique_username(self) -> None:
product_plan = self.ProductPlanFactory.create()
self.UserFactory.create(username="matt")
data = {
"username": "matt",
"email": "matt@test.com",
"password": "asecrettoeverybody",
"stripe_token": "tok_1234",
"postal_code": "12345",
}
form = SignupForm(product_plan, data=data)
self.assertFalse(form.is_valid())
self.assertIn("username", form.errors)
@mock.patch("conductor.accounts.forms.stripe_gateway")
def test_creates_user(self, stripe_gateway: mock.MagicMock) -> None:
product_plan = self.ProductPlanFactory.create()
stripe_gateway.create_customer.return_value = "cus_1234"
data = {
"username": "matt",
"email": "matt@test.com",
"password": "asecrettoeverybody",
"stripe_token": "tok_1234",
"postal_code": "21702",
}
form = SignupForm(product_plan, data=data)
self.assertTrue(form.is_valid())
user = form.save()
self.assertEqual(user.username, "matt")
self.assertEqual(user.email, "matt@test.com")
self.assertEqual(user.profile.postal_code, "21702")
self.assertEqual(user.profile.stripe_customer_id, "cus_1234")
@mock.patch("conductor.accounts.forms.stripe_gateway")
def test_missing_postal_code(self, stripe_gateway: mock.MagicMock) -> None:
product_plan = self.ProductPlanFactory.create()
stripe_gateway.create_customer.return_value = "cus_1234"
data = {
"username": "matt",
"email": "matt@test.com",
"password": "asecrettoeverybody",
"stripe_token": "tok_1234",
"postal_code": None,
}
form = SignupForm(product_plan, data=data)
self.assertTrue(form.is_valid())
user = form.save()
self.assertEqual(user.profile.postal_code, "")
class TestDeactivateForm(TestCase):
def test_matching_email(self) -> None:
user = self.UserFactory.create()
data = {"email": user.email}
form = DeactivateForm(user, data=data)
is_valid = form.is_valid()
self.assertTrue(is_valid)
def test_mismatched_email(self) -> None:
user = self.UserFactory.create()
data = {"email": f"nomatch-{user.email}"}
form = DeactivateForm(user, data=data)
is_valid = form.is_valid()
self.assertFalse(is_valid)
self.assertIn("email", form.errors)
@mock.patch("conductor.accounts.forms.stripe_gateway")
def test_save(self, stripe_gateway: mock.MagicMock) -> None:
"""The user subscription gets cancelled and the user is marked inactive."""
user = self.UserFactory.create()
form = DeactivateForm(user)
form.save()
stripe_gateway.cancel_subscription.assert_called_once_with(user)
user.refresh_from_db()
self.assertFalse(user.is_active)
|
LaPingvino/pasportaservo
|
pasportaservo/settings/dev_etenil.py
|
Python
|
agpl-3.0
| 652
| 0
|
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'pasportaservo',
'USER': 'guillaume',
}
}
LANGUAGE_CODE = 'en'
INSTALLED_APPS = (
'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
|
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'django_countries',
'phonenumber_field',
'bootstrapform',
'leaflet',
'postm
|
an',
'hosting',
'pages',
'debug_toolbar',
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
garyjs/Newfiesautodialer
|
newfies/context_processors.py
|
Python
|
mpl-2.0
| 564
| 0.001773
|
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not di
|
stributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2013 Star2Billin
|
g S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
import newfies
from django.conf import settings
def newfies_version(request):
return {'newfies_version': newfies.__version__, 'SURVEYDEV': settings.SURVEYDEV}
|
ianmiell/shutit-openshift-vm
|
airflow.py
|
Python
|
mit
| 4,398
| 0.012051
|
from shutit_module import ShutItModule
import base64
class openshift_airflow(ShutItModule):
def build(self, shutit):
shutit.send('cd /tmp/openshift_vm')
shutit.login(command='vagrant ssh')
shutit.login(command='sudo su -',password='vagrant',note='Become root (there is a problem logging in as admin with the vagrant user')
# AIRFLOW BUILD
# Takes too long.
#shutit.send('oc describe buildconfig airflow',note='Ideally you would take this github url, and update your github webhooks for this project. But there is no public URL for this server so we will skip and trigger a build manually.')
#shutit.send('oc start-build airflow',note='Trigger a build by hand')
#shutit.send('sleep 60 && oc logs -f build/airflow-1',note='Follow the build and wait for it to terminate')
# IMAGE STREAM
shutit.send_file('/tmp/imagestream.json','''
{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "airflow"
},
"spec": {},
"status": {
"dockerImageRepository": ""
}
}''')
shutit.send('oc create -f /tmp/imagestream.json')
# BUILD CONFIG
shutit.send_file('secret.json','''{
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "mysecret"
},
"namespace": "user2",
"data": {
"username": "''' + base64.b64encode('myusername') + '''"
}
}''')
shutit.send('oc create -f secret.json')
shutit.send_file('/tmp/buildconfig.json','''
{
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
"name": "airflow",
"labels": {
"name": "airflow-build"
}
},
"spec": {
"source": {
"type": "Git",
"git": {
"uri": "https://github.com/ianmiell/shutit-airflow"
}
},
"strategy": {
"type": "Docker"
},
"output": {
"to": {
"kind": "ImageStreamTag",
"name": "airflow:latest"
}
},
"volumes": {
"name": "secvol",
"secret": {
"secretname": "mysecret"
}
}
}
}
''')
shutit.send('oc create -f /tmp/buildconfig.json')
# DEPLOYMENT CONFIG
shutit.send_file('/tmp/deploymentconfig.json','''
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "airflow"
|
},
"spec": {
"strategy": {
"type": "Rolling",
"rollingParams": {
"updatePeriodSeconds": 1,
"intervalSeconds": 1,
"timeoutSeconds": 120
},
"resources": {}
},
"triggers": [
|
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"nodejs-helloworld"
],
"from": {
"kind": "ImageStreamTag",
"name": "airflow:latest"
}
}
},
{
"type": "ConfigChange"
}
],
"replicas": 1,
"selector": {
"name":"airflow"
},
"template": {
"metadata": {
"labels": {
"name": "airflow"
}
},
"spec": {
"containers": [
{
"name": "airflow",
"image": "airflow",
"ports": [
{
"containerPort": 8080,
"protocol": "TCP"
}
],
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"imagePullPolicy": "IfNotPresent",
"securityContext": {
"capabilities": {},
"privileged": false
}
}
],
"restartPolicy": "Always",
"dnsPolicy": "ClusterFirst"
}
}
},
"status": {}
}
''')
shutit.send('oc create -f /tmp/deploymentconfig.json')
shutit.logout()
shutit.logout()
return True
def module():
return openshift_airflow(
'shutit.openshift_vm.openshift_vm.openshift_airflow', 1418326706.005,
description='',
maintainer='',
delivery_methods=['bash'],
depends=['shutit.openshift_vm.openshift_vm.openshift_vm']
)
|
vonivgol/pyreminder
|
src/main.py
|
Python
|
gpl-2.0
| 1,723
| 0.002902
|
from tkinter import *
from gui import GUI
from reminder import Reminder
import argparse
import time
if __name__ == '__main__':
print("""
Copyright (C) 2016 Logvinov Dima.
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions.
""")
parser = argparse.ArgumentParser(description="PyReminder - python reminder app for ElementaryOS.")
parser.add_argument('--add', help="Add new event.\n"
"$ pyreminder --add 'Event text:Event time hours.minutes.day.month.year'", type=str)
parser.add_argument('--list', help="Print list of events.", action="store_true")
parser.add_argument('--delete', help="Delete event.\n "
"$ pyreminder --delete
|
event_id ", type=int)
parser.add_argument('--gui', help="Run gui program.", action="store_true")
args = parser.parse_args()
reminder = Reminder()
if args.gui:
root = Tk()
root.geometry("500x200+350+500")
app = GUI(root, reminder)
root.mainloop()
if args.add:
event_text, event_date = args.add.split(":")
reminder.add_task(event_date, event_text)
|
reminder.update_db()
if args.list:
tasks = reminder.get_tasks_list()
if len(tasks) > 0:
for task_id in range(0, len(tasks)):
task_id = str(task_id)
print("id:{0} time:{1} text:{2}".
format(task_id, tasks[task_id][0], tasks[task_id][1]))
if args.delete:
if not reminder.delete_task(str(args.delete)):
print("Task: {} not found.".format(str(args.delete)))
|
kowey/attelo
|
attelo/io.py
|
Python
|
gpl-3.0
| 11,402
| 0
|
"""
Saving and loading data or models
"""
from __future__ import print_function
from itertools import chain
import codecs
import copy
import csv
import json
import sys
import time
import traceback
import joblib
from sklearn.datasets import load_svmlight_file
from .edu import (EDU, FAKE_ROOT_ID, FAKE_ROOT)
from .table import (DataPack, DataPackException,
UNKNOWN, UNRELATED,
get_label_string, groupings)
from .util import truncate
# pylint: disable=too-few-public-methods
class IoException(Exception):
"""
Exceptions related to reading/writing data
"""
def __init__(self, msg):
super(IoException, self).__init__(msg)
# ---------------------------------------------------------------------
# feedback
# ---------------------------------------------------------------------
# pylint: disable=redefined-builtin, invalid-name
class Torpor(object):
"""
Announce that we're about to do something, then do it,
then say we're done.
Usage: ::
with Torpor("doing a slow thing"):
some_slow_thing
Output (1): ::
doing a slow thing...
Output (2a): ::
doing a slow thing... done
Output (2b): ::
doing a slow thing... ERROR
<stack trace>
:param quiet: True to skip the message altogether
"""
def __init__(self, msg,
sameline=True,
quiet=False,
file=sys.stderr):
self._msg = msg
self._file = file
self._sameline = sameline
self._quiet = quiet
self._start = 0
self._end = 0
def __enter__(self):
# we grab the wall time instead of using time.clock() (A)
# because we # are not using this for profiling but just to
# get a rough idea what's going on, and (B) because we want
# to include things like IO into the mix
self._start = time.time()
if self._quiet:
return
elif self._sameline:
print(self._msg, end="... ", file=self._file)
else:
print("[start]", self._msg, file=self._file)
def __exit__(self, type, value, tb):
self._end = time.time()
if tb is None:
if not self._quiet:
done = "done" if self._sameline else "[-end-] " + self._msg
ms_elapsed = 1000 * (self._end - self._start)
final_msg = u"{} [{:.0f} ms]".format(done, ms_elapsed)
print(final_msg, file=self._file)
else:
if not self._quiet:
oops = "ERROR!" if self._sameline else "ERROR! " + self._msg
print(oops, file=self._file)
traceback.print_exception(type, value, tb)
sys.exit(1)
# pylint: redefined-builtin, invalid-name
# ---------------------------------------------------------------------
# tables
# ---------------------------------------------------------------------
def load_edus(edu_file):
"""
Read EDUs (see :doc:`../input`)
:rtype: [EDU]
.. _format: https://github.com/kowey/attelo/doc/inputs.rst
"""
def read_edu(row):
'interpret a single row'
expected_len = 6
if len(row) != expected_len:
oops = ('This row in the EDU file {efile} has {num} '
'elements instead of the expected {expected}: '
'{row}')
raise IoException(oops.format(efile=edu_file,
num=len(row),
expected=expected_len,
row=row))
[global_id, txt, grouping, subgrouping, start_str, end_str] = row
start = int(start_str)
end = int(end_str)
return EDU(global_id,
txt.decode('utf-8'),
start,
end,
grouping,
subgrouping)
with open(edu_file, 'rb') as instream:
reader = csv.reader(instream, dialect=csv.excel_tab)
return [read_edu(r) for r in reader if r]
def load_pairings(edu_file):
"""
Read and return EDU pairings (see :doc:`../input`).
We assume the order is parent, child
:rtype: [(string, string)]
.. _format: https://github.com/kowey/attelo/doc/inputs.rst
"""
def read_pair(row):
'interpret a single row'
if len(row) < 2 or len(row) > 3:
oops = ('This row in the pairings file {efile} has '
'{num} elements instead of the expected 2 or 3')
raise IoException(oops.format(efile=edu_fil
|
e,
num=len(row),
row=row))
return tuple(row[:2])
with open(edu_file, 'rb') as instream:
reader = csv.reader(instream, dialect=csv.excel_tab)
return [read_pair(r) for r in reader if r]
def load_labels(feature_file):
"""
Read the very top of a feature file and read the labels comment,
|
return the sequence of labels, else return None
:rtype: [string] or None
"""
with codecs.open(feature_file, 'r', 'utf-8') as stream:
line = stream.readline()
if line.startswith('#'):
seq = line[1:].split()
if seq[0] == 'labels:':
return seq[1:]
# fall-through case, no labels found
return None
def _process_edu_links(edus, pairings):
"""
Convert from the results of :py:method:load_edus: and
:py:method:load_pairings: to a sequence of edus and pairings
respectively
:rtype: ([EDU], [(EDU,EDU)])
"""
edumap = {e.id: e for e in edus}
enames = frozenset(chain.from_iterable(pairings))
if FAKE_ROOT_ID in enames:
edus2 = [FAKE_ROOT] + edus
edumap[FAKE_ROOT_ID] = FAKE_ROOT
else:
edus2 = copy.copy(edus)
naughty = [x for x in enames if x not in edumap]
if naughty:
oops = ('The pairings file mentions the following EDUs but the EDU '
'file does not actually include EDUs to go with them: {}')
raise DataPackException(oops.format(truncate(', '.join(naughty),
1000)))
pairings2 = [(edumap[e1], edumap[e2]) for e1, e2 in pairings]
return edus2, pairings2
def load_multipack(edu_file, pairings_file, feature_file, vocab_file,
verbose=False):
"""
Read EDUs and features for edu pairs.
Perform some basic sanity checks, raising
:py:class:`IoException` if they should fail
:rtype: :py:class:`Multipack` or None
"""
vocab = load_vocab(vocab_file)
with Torpor("Reading edus and pairings", quiet=not verbose):
edus, pairings = _process_edu_links(load_edus(edu_file),
load_pairings(pairings_file))
with Torpor("Reading features", quiet=not verbose):
labels = [UNKNOWN] + load_labels(feature_file)
# pylint: disable=unbalanced-tuple-unpacking
data, targets = load_svmlight_file(feature_file,
n_features=len(vocab))
# pylint: enable=unbalanced-tuple-unpacking
with Torpor("Build data packs", quiet=not verbose):
dpack = DataPack.load(edus, pairings, data, targets,
labels, vocab)
return {k: dpack.selected(idxs) for
k, idxs in groupings(pairings).items()}
def load_vocab(filename):
"read feature vocabulary"
features = []
with codecs.open(filename, 'r', 'utf-8') as stream:
for line in stream:
features.append(line.split('\t')[0])
return features
# ---------------------------------------------------------------------
# predictions
# ---------------------------------------------------------------------
def write_predictions_output(dpack, predicted, filename):
"""
Write predictions to an output file whose format
is documented in :doc:`../output`
"""
links = {}
for edu1, edu2, label in predicted:
links[(edu1, edu2)] = label
def mk_row(edu1, edu2):
'return
|
perlygatekeeper/glowing-robot
|
google_test/bunny_escape/bunnyEscape_fixed.py
|
Python
|
artistic-2.0
| 3,411
| 0.013193
|
def printMap(the_map,note):
print(note)
for row in the_map:
row_str = ""
for cell in row:
row_str += " {0:3d}".format(cell)
print(row_str)
def pathFinder(x, y, the_map, steps, lastX, lastY, wall):
# count possible moves
debug = False
options = []
if x-1 >= 0: # East
options.append([-1, 0])
if x+1 <= lastX: # West
options.append([ 1, 0])
if y-1 >= 0: # North
options.append([ 0,-1])
if y+1 <= lastY: # South
options.append([ 0, 1])
# increment step
steps += 1
if debug:
printMap(the_map,"({0:2d},{1:2d}) steps:{2:3d} {3:6} before options ---------------------------------".format(x,y,steps,wall))
|
for option in options:
# new x and y
newX = x + option[0]
# print("x({0:2d}) + option[0]({1:2d}) -> newX({2:2d})".format(x,option[0],newX) )
newY = y + option[1]
# print("y({0:2d}) + option[1]({1:2d}) -> newY({2:2d})".format(y,option[1],newY) )
if debug:
print(" looking at ({0:2d},{1:2d}) with value={2:2d} and with steps:{3:3d} {4:6} from ({5:2d},{6:2d})".format(newX,newY,the_map[newY][newX],steps,wall,x,y))
# if statements
|
if the_map[newY][newX] == 0:
the_map[newY][newX] = steps
if newX != 0 or newY != 0:
pathFinder(newX, newY, the_map, steps, lastX, lastY, wall)
elif the_map[newY][newX] > 1 and steps <= the_map[newY][newX]:
the_map[newY][newX] = steps
if newX != 0 or newY != 0:
pathFinder(newX, newY, the_map, steps, lastX, lastY, wall)
elif ( the_map[newY][newX] == 1 or the_map[newY][newX] < 0 ) and not wall and (newX != lastX or newY != lastY):
if debug:
print("Removing a wall at {0:2d}:{1:2d}".format(newX,newY))
wall = True
the_map[newY][newX] = steps * -1
pathFinder(newX, newY, the_map, steps, lastX, lastY, wall)
wall = False
elif the_map[newY][newX] > 1 and steps < abs(the_map[newY][newX]):
if(the_map[newY][newX] < 0):
the_map[newY][newX] = steps * -1
if(the_map[newY][newX] > 0):
the_map[newY][newX] = steps
if newX != 0 or newY != 0:
pathFinder(newX, newY, the_map, steps, lastX, lastY, wall)
if debug:
printMap(the_map,"({0:2d},{1:2d}) steps:{2:3d} {3:6} after options ---------------------------------".format(x,y,steps,wall))
def solution(the_map):
debug = False
steps = 1
lastX = len(the_map[0]) - 1
lastY = len(the_map) - 1
x = lastX
y = lastY
testMap = the_map[:]
testMap[y][x] = 1
pathFinder(x, y, testMap, steps, lastX, lastY, False)
if debug:
printMap(the_map,"All done. {0:3d} ------------------------------".format(testMap[0][0]))
return(testMap[0][0])
#print(solution([[0, 1], [0, 0]]))
#print(solution([[0, 1, 1, 0], [0, 0, 0, 1], [1, 1, 0, 0], [1, 1, 1, 0]]))
print(solution([[0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]))
#print(solution([[0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]))
|
robocomp/robocomp
|
tools/rcmonitor/examples/pyramidRoiRGB.py
|
Python
|
gpl-3.0
| 2,369
| 0.01984
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010 by RoboLab - University of Extremadura
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
#
import Ice, sys, math, traceback
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtGui import *
class C(QWidget):
def __init__(self, endpoint, modules):
QWidget.__init__(self)
self.ic = Ice.initialize(sys.argv)
self.mods = modules
print ('Endpoint', )
self.prx = self.ic.stringToProxy(endpoint)
self.proxy = self.mods['RoboCompRoimant'].RoimantPrx.checkedCast(self.
|
prx)
self.leftPyrList = []
self.rightPyrList = []
for level in ran
|
ge(4):
self.leftPyrList.append(None)
self.rightPyrList.append(None)
self.wdth = self.proxy.getRoiParams().width
self.hght = self.proxy.getRoiParams().height
self.job()
def job(self):
output = self.proxy.getBothPyramidsRGBAndLeftROIList()
pos=0
size=self.wdth*self.hght*3
for level in range(4):
self.leftPyrList[level] = output[0][pos:pos+size]
self.rightPyrList[level] = output[2][pos:pos+size]
pos = pos + size
size = size/4
def paintEvent(self, event=None):
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing, True)
xPos = -self.wdth/2
yPos = self.height()
for level in range(len(self.leftPyrList)):
xPos = xPos + (self.wdth/2)/(2**level)
yPos = yPos - self.hght/(2**level)
qimage = QImage(self.leftPyrList[level], self.wdth/(2**level), self.hght/(2**level), QImage.Format_RGB888);
painter.drawImage(QPointF(xPos, yPos), qimage)
qimage = QImage(self.rightPyrList[level], self.wdth/(2**level), self.hght/(2**level), QImage.Format_RGB888);
painter.drawImage(QPointF(xPos+self.wdth, yPos), qimage)
painter.end()
painter = None
|
observerss/yamo
|
yamo/document.py
|
Python
|
mit
| 10,377
| 0.000096
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from functools import partialmethod
from pymongo.operations import UpdateOne, InsertOne
from .cache import CachedModel
from .errors import ConfigError, ArgumentError
from .metatype import DocumentType, EmbeddedDocumentType
from .fields import EmbeddedField
log = logging.getLogger('yamo')
class classproperty(object):
def __init__(self, fget):
self.fget = fget
def __get__(self, owner_self, owner_cls):
return self.fget(owner_cls)
class MongoOperationMixin(object):
""" Mongodb raw operations """
@classmethod
def run_command(cls, *args, **kwargs):
cmd = kwargs['cmd']
del kwargs['cmd']
return getattr(cls._coll, cmd)(*args, **kwargs)
for cmd in [
'insert_one', 'insert_many',
'find', 'find_one', 'find_one_and_delete',
'find_one_and_replace', 'find_one_and_update',
'update_one', 'update_many', 'replace_one',
'delete_one', 'delete_many',
'create_index', 'create_indexes', 'reindex',
'index_information', 'list_indexes',
'drop', 'drop_index', 'drop_indexes',
'aggregate', 'group', 'inline_map_reduce', 'map_reduce',
'bulk_write',
'initialize_ordered_bulk_op', 'initialize_unordered_bulk_op',
'rename', 'count', 'distinct', 'options', 'with_options',
]:
locals()[cmd] = partialmethod(run_command, cmd=cmd)
class InitMixin(object):
def __init__(self, data=None):
self._refs = {}
self._data = {}
self._defaults = {}
if data:
for name, field in self._fields.items():
if name in data:
value = data[name]
else:
value = field.default
if callable(value):
value = value()
if value is not None:
self._defaults[name] = value
value = field.to_storage(value)
self._data[name] = value
class ValidationMixin(object):
def validate(self):
for name, field in self._fields.items():
if name in self._data:
value = field.to_python(self._data[name])
field.validate(value)
def to_dict(self):
d = {}
for name, field in self._fields.items():
value = field.to_python(self._data.get(name))
if isinstance(value, list):
ovalue, value = value, []
for v in ovalue:
if isinstance(v, EmbeddedDocument):
v = v.to_dict()
value.append(v)
d[name] = value
return d
class MetaMixin(object):
""" helper methods for "Meta" info """
@classproperty
def unique_fields(cls):
names = set()
for idx in cls.Meta._indexes or []:
if idx.kwargs.get('unique'):
for key in idx.keys:
if isinstance(key, tuple):
names.add(key[0])
else:
names.add(key)
return names
@classmethod
def prepare(cls):
cls.ensure_indexes()
cls.ensure_shards()
@classmethod
def ensure_indexes(cls):
allowed_keys = set(['name', 'unique', 'background', 'sparse',
'bucketSize', 'min', 'max', 'expireAfterSeconds'])
for idx in cls.Meta._indexes or []:
if set(idx.kwargs.keys()) - allowed_keys:
raise ArgumentError(MetaMixin.ensure_indexes, idx.kwargs)
cls._coll.create_index(idx.keys, **idx.kwargs)
@classmethod
def ensure_shards(cls):
if cls.Meta._shardkey:
admin = cls._conn.admin
dbname = cls._db.name
try:
admin.command('enableSharding', dbname)
except Exception as e:
if 'already' in e:
try:
admin.command(
'shardCollection',
'{}.{}'.format(dbname,
cls.Meta.__collection__),
key=cls.Meta._shardkey.key)
except Exception as e:
if 'already' not in e:
log.warning('shard collection failed: '
'{}'.format(str(e)))
else:
log.warning('enable shard failed: '
'{}'.format(str(e)))
class MapperMixin(object):
""" ORM only method mixins """
def refresh(self):
_id = self._data.get('_id')
self._data = {}
if _id:
doc = self._coll.find_one({'_id': _id})
if doc:
self._data = doc
self.validate()
@classmethod
def query(cls, *args, **kwargs):
""" Same as collection.find, but return Document then dict """
for doc in cls._coll.find(*args, **kwargs):
yield cls.from_storage(doc)
@classmethod
def query_one(cls, *args, **kwargs):
""" Same as collection.find_one, but return Document then dict """
doc = cls._coll.find_one(*args, **kwargs)
if doc:
return cls.from_storage(doc)
def update(self, update):
""" Update self """
self._coll.upda
|
te_one({'_id': self._data['_id']},
update)
def upsert(self, null=False):
""" Insert or Update Document
:param null: whether update null values
Wisely select unique field values as filter,
Update with upsert=True
"""
self._pre_save()
self.validate()
filter_ = self._upsert_filter()
if filter_:
update = self._upsert_update(filter_, null)
if update['$set']:
|
r = self._coll.find_one_and_update(filter_, update,
upsert=True, new=True)
self._data['_id'] = r['_id']
else:
r = self._coll.insert_one(self._data)
self._data['_id'] = r.inserted_id
def save(self):
self._pre_save()
self._ensure_id()
self.validate()
if '_id' in self._data:
doc = self._data.copy()
del doc['_id']
self._coll.update_one({'_id': self._data['_id']},
{'$set': doc},
upsert=True)
else:
self._coll.insert_one(self._data)
@classmethod
def bulk_upsert(cls, docs, null=False):
if len(docs) == 0:
return 0
requests = []
for doc in docs:
if not isinstance(doc, cls):
raise ArgumentError(cls, docs)
doc._pre_save()
doc.validate()
filter_ = doc._upsert_filter()
if filter_:
update = doc._upsert_update(filter_, null)
if update['$set']:
requests.append(UpdateOne(filter_, update, upsert=True))
else:
requests.append(InsertOne(doc._data))
r = cls._coll.bulk_write(requests, ordered=False)
return r.upserted_count
def remove(self):
_id = self._ensure_id()
if _id:
self._coll.delete_one({'_id': _id})
else:
log.warning("This document has no _id, it can't be deleted")
@classmethod
def cached(cls, timeout=60, cache_none=False):
""" Cache queries
:param timeout: cache timeout
:param cache_none: cache None result
Usage::
>>> Model.cached(60).query({...})
"""
return CachedModel(cls=cls, timeout=timeout, cache_none=cache_none)
def _pre_save(self):
for name, field in self._fields.items():
value = field.pre_save_val(self._data.get(name))
if value:
setattr(self, name, value)
if not field.required and name in self._data \
|
sasha-gitg/python-aiplatform
|
samples/snippets/job_service/cancel_data_labeling_job_sample.py
|
Python
|
apache-2.0
| 1,485
| 0.001347
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START aiplatform_cancel_data_labeling_job_sample]
from google.cloud import aiplatform
def cancel_data_labeling_job_sample(
project: str,
data_labeling_job_id: str,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
# The AI Platform se
|
rvices require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.JobServiceClient(client_options=client_options)
name = client.data_labeling_job_path(
|
project=project, location=location, data_labeling_job=data_labeling_job_id
)
response = client.cancel_data_labeling_job(name=name)
print("response:", response)
# [END aiplatform_cancel_data_labeling_job_sample]
|
agentOfChaos/brainPizza
|
brainpizza.py
|
Python
|
gpl-2.0
| 3,474
| 0.004893
|
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
import copy
from PIL import Image
import os
import random
import time
import math
imagesize = (120, 120)
peak = 100
gusti = ["margherita", "crudo", "funghi", "salame", "rucola", "4formaggi", "americana"]
def buildnet():
inputs = len(gusti)
outputs = imagesize[0] * imagesize[1] * 3 # R G B
hiddens = (120 * 3) # lol, I have no idea
return buildNetwork(inputs, hiddens, outputs)
def getSwitchTuple(index, lengt, disturb=0):
ret = []
for i in range(lengt):
if i == index:
ret.append((1.0 + disturb) * peak)
else:
ret.append(0.0)
return tuple(ret)
def buildtrainset():
inputs = len(gusti)
outputs = imagesize[0] * imagesize[1] * 3
ds = SupervisedDataSet(inputs, outputs)
for gusto in gusti:
indice = gusti.index(gusto)
pizzaset = os.listdir("./pizze/" + gusto + "/")
print("Training set for gusto: %s (%s)" % (gusto, ",".join(map(str, getSwitchTuple(indice, inputs)))))
for pizzaname in pizzaset:
pizza = "./pizze/" + gusto + "/" + pizzaname
print(" Training with %s" % pizza, end=" ")
ds.addSample(getSwitchTuple(indice, inputs, disturb=random.uniform(-
|
0.3, 0.3)), processImg(pizza))
print("done")
return ds
def outimage(outtuple, name):
img = Image.new('RGB', imagesize, "white")
pixels = img.load()
for i in range(img.size[0]):
for j in range(img.size[1]):
tup_index = (i*img.size[0] + j) * 3
pixels[i,j] = (in
|
t(outtuple[tup_index]), int(outtuple[tup_index + 1]), int(outtuple[tup_index + 2]))
img.save(name)
#img.show()
def calcETA(timestep, remaining):
totsec = timestep * remaining
totmin = math.floor(totsec / 60)
remsec = totsec - (totmin * 60)
return totmin, remsec
def letsrock(rounds=25):
minimum = 999999999999
bestnet = None
print("Initializing neural network...")
net = buildnet()
print("Building training set...")
trset = buildtrainset()
trainer = BackpropTrainer(net, trset)
started = time.time()
for i in range(rounds):
print("training: %d%%... " % ((i*100) / rounds), end="")
err = trainer.train()
timestep = (time.time() - started) / (i+1)
min, sec = calcETA(timestep, rounds - i - 1)
if err < minimum:
minimum = err
bestnet = copy.deepcopy(net)
print("error: %.05f - ETA: %02d:%02d" % (err, min, sec), end="\r")
#trainer.trainUntilConvergence(verbose=True)
print("training: complete! ")
return bestnet
def fullShow():
net = letsrock()
for gusto in gusti:
print("Creating pizza, gusto: %s" % gusto)
indice = gusti.index(gusto)
activ = getSwitchTuple(indice, len(gusti))
name = "oven/" + gusto + ".jpg"
rgb = net.activate(activ)
datum = list(rgb)
outimage(datum, name)
def processImg(filename):
img = Image.open(filename)
img = img.resize(imagesize, Image.ANTIALIAS)
rgb_img = img.convert('RGB')
pixels = []
for x in range(imagesize[0]):
for y in range(imagesize[1]):
tup = tuple(rgb_img.getpixel((x, y)))
pixels.extend(tup)
return tuple(pixels)
if __name__ == "__main__":
fullShow()
|
lukeburden/django-allauth
|
allauth/socialaccount/providers/ynab/provider.py
|
Python
|
mit
| 852
| 0
|
from allauth.socialaccount.providers.base import AuthAction, ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class Scope(object):
ACCESS = 'read-only'
class YNABAccount(ProviderAccount):
pass
class YNABProvider(OAuth2Provider):
id = 'ynab'
name = 'YNAB'
account_class = YNABAccount
def get_default_scope(self):
scope = [Scope.ACCESS]
return scope
def get_auth
|
_params(self, request, action):
ret = super(YNABProvider, self).get_auth_params(request,
action)
if action == AuthAction.REAUTHENTICATE:
ret['prompt'] = 'select_account consent'
return ret
def extract_uid(self, data):
return str(data['data']['user']['id'])
provi
|
der_classes = [YNABProvider]
|
HealthCatalystSLC/healthcareai-py
|
healthcareai/common/database_library_validators.py
|
Python
|
mit
| 626
| 0.00639
|
import sys
from healthcareai.common.healthcareai_error import HealthcareAIError
def validate_pyodbc_is_loaded():
""" Si
|
mple check that alerts user if they are do not have pyodbc installed, which is not a requirement. """
if 'pyodbc' not in s
|
ys.modules:
raise HealthcareAIError('Using this function requires installation of pyodbc.')
def validate_sqlite3_is_loaded():
""" Simple check that alerts user if they are do not have sqlite installed, which is not a requirement. """
if 'sqlite3' not in sys.modules:
raise HealthcareAIError('Using this function requires installation of sqlite3.')
|
weld-project/weld
|
weld-python/tests/grizzly/core/test_frame.py
|
Python
|
bsd-3-clause
| 3,167
| 0.005052
|
"""
Test basic DataFrame functionality.
"""
import pandas as pd
import pytest
import weld.grizzly as gr
def get_frames(cls, strings):
"""
Returns two DataFrames for testing binary operators.
The DataFrames have columns of overlapping/different names, types, etc.
"""
df1 = pd.DataFrame({
'name': ['Bob', 'Sally', 'Kunal', 'Deepak', 'James', 'Pratiksha'],
'lastName': ['Kahn', 'Lopez', 'Smith', 'Narayanan', 'Thomas', 'Thaker'],
'age': [20, 30, 35, 20, 50, 35],
'score': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df2 = pd.DataFrame({
'firstName': ['Bob', 'Sally', 'Kunal', 'Deepak', 'James', 'Pratiksha'],
'lastName': ['Kahn', 'Lopez', 'smith', 'narayanan', 'Thomas', 'thaker'],
'age': [25, 30, 45, 20, 60, 35],
'scores': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
if not strings:
df1 = df1.drop(['name', 'lastName'], axis=1)
df2 = df2.drop(['firstName', 'lastName'], axis=1)
return (cls(df1), cls(df2))
def _test_binop(pd_op, gr_op, strings=True):
"""
Test a binary operator.
Binary operators align on column name. For columns that don't exist in both
DataFrames, the column is filled with NaN (for non-comparison operations) and
or False (for comparison operations).
If the RHS is a Series, the Series should be added to all columns.
"""
df1, df2 = get_frames(pd.DataFrame, strings)
gdf1, gdf2 = get_frames(gr.GrizzlyDataFrame, strings)
expect = pd_op(df1, df2)
result = gr_op(gdf1, gdf2).to_pandas()
assert expect.equals(result)
def test_evaluation():
# Test to make sure that evaluating a DataFrame once caches the result/
# doesn't cause another evaluation.
df1 = gr.GrizzlyDataFrame({
'
|
age': [20, 30, 35, 20, 50, 35],
'score': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df2 = gr.GrizzlyDataFrame({
|
'age': [20, 30, 35, 20, 50, 35],
'scores': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df3 = (df1 + df2) * df2 + df1 / df2
assert not df3.is_value
df3.evaluate()
assert df3.is_value
weld_value = df3.weld_value
df3.evaluate()
# The same weld_value should be returned.
assert weld_value is df3.weld_value
def test_add():
_test_binop(pd.DataFrame.add, gr.GrizzlyDataFrame.add, strings=False)
def test_sub():
_test_binop(pd.DataFrame.sub, gr.GrizzlyDataFrame.sub, strings=False)
def test_mul():
_test_binop(pd.DataFrame.mul, gr.GrizzlyDataFrame.mul, strings=False)
def test_div():
_test_binop(pd.DataFrame.div, gr.GrizzlyDataFrame.div, strings=False)
def test_eq():
_test_binop(pd.DataFrame.eq, gr.GrizzlyDataFrame.eq, strings=True)
def test_ne():
_test_binop(pd.DataFrame.ne, gr.GrizzlyDataFrame.ne, strings=True)
def test_le():
_test_binop(pd.DataFrame.le, gr.GrizzlyDataFrame.le, strings=False)
def test_lt():
_test_binop(pd.DataFrame.lt, gr.GrizzlyDataFrame.lt, strings=False)
def test_ge():
_test_binop(pd.DataFrame.ge, gr.GrizzlyDataFrame.ge, strings=False)
def test_gt():
_test_binop(pd.DataFrame.gt, gr.GrizzlyDataFrame.gt, strings=False)
|
jgillis/casadi
|
test/python/sdp.py
|
Python
|
lgpl-3.0
| 12,021
| 0.049746
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010 by Joel Andersson, Moritz Diehl, K.U.Leuven. All rights reserved.
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi import *
from casadi.tools import *
import casadi as c
from numpy import *
import unittest
from types import *
from helpers import *
class SDPtests(casadiTestCase):
@requires("DSDPSolver")
def test_memleak1(self):
self.message("memleak1")
# Originates from http://sdpa.indsys.chuo-u.ac.jp/sdpa/files/sdpa-c.6.2.0.manual.pdf
b = DMatrix([48,-8,20])
A = vertcat([DMatrix([[10,4],[4,0]]),DMatrix([[0,0],[0,-8]]),DMatrix([[0,-8],[-8,-2]])])
makeSparse(A)
A.printMatrix()
C = DMatrix([[-11,0],[0,23]])
makeSparse(C)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
@requires("DSDPSolver")
def test_memleak2(self):
self.message("memleak1")
# Originates from http://sdpa.indsys.chuo-u.ac.jp/sdpa/files/sdpa-c.6.2.0.manual.pdf
b = DMatrix([48,-8,20])
A = vertcat([DMatrix([[10,4],[4,0]]),DMatrix([[0,0],[0,-8]]),DMatrix([[0,-8],[-8,-2]])])
makeSparse(A)
A.printMatrix()
C = DMatrix([[-11,0],[0,23]])
makeSparse(C)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
@requires("DSDPSolver")
def test_scalar(self):
self.message("scalar")
#
# min n1*x
# x
# n3*x-n2>=0
#
# -> x = n2/n3
#
# 1 active constraint, cost: d(n1*x)/d(n*x-n2) = 1/[d(n3*x-n2)/d(n1*x)] = n1/n3
n1 = 3.1
n2 = 2.3
n3 = 4.7
b = DMatrix(n1)
Ai = [DMatrix(n3)]
A = vertcat(Ai)
makeSparse(A)
C = DMatrix(n2)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(n1*n2/n3),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(n1*n2/n3),digits=5)
self.checkarray(dsp.output("primal"),DMatrix(n2/n3),digits=5)
self.checkarray(dsp.output("p"),DMatrix(0),digits=5)
self.checkarray(dsp.output("dual"),DMatrix(n1/n3),digits=5)
@requires("DSDPSolver")
def test_linear_equality(self):
self.message("linear equality")
# min n1*x
# x
#
# n3*x-n2 >= 0 |__ n3*x == n2
# -(n3*x-n2) >= 0 |
#
# solution: x=n2/n3
n3 = 1.7
n1 = 2.1
n2 = 1.3
b = DMatrix([n1])
Ai = [ blkdiag([n3,-n3])]
C = blkdiag([n2,-n2])
A = vertcat(Ai)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(n1*n2/n3),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(n
|
1*n2/n3),digits=5)
self.checkarray(dsp.output("primal"),DMatrix(n2/n3),digits=5)
self.checkarray(dsp.output("p"),DMatrix.zeros(2,2),digits=5)
self.checkarray(dsp.output("dual")[0,0]-dsp.output("dual")[1,1],DMatrix(n1/n3),digits=5)
@requires("DSDPSolver")
def test_linear_interpolation1(self):
self.message("linear interpolation1")
# min 2*x0 + x1*3
# x0,x1
# x0+x1 - 1 >=0 -->
|
x0+x1>=1
# x0 >=0
# x1 >=0
#
# solution: x0=1, x1=0
b = DMatrix([2,3])
Ai = [ blkdiag([1,1,0]), blkdiag([1,0,1])]
C = blkdiag([1,0,0])
A = vertcat(Ai)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(2),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(2),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([1,0]),digits=5)
self.checkarray(dsp.output("p"),DMatrix([[0,0,0],[0,1,0],[0,0,0]]),digits=5)
self.checkarray(dsp.output("dual"),DMatrix([[2,0,0],[0,0,0],[0,0,1]]),digits=5)
@requires("DSDPSolver")
def test_linear_interpolation2(self):
self.message("linear interpolation2")
# min 2*x0 + 3*x1
# x0,x1
# -(x0 + x1 -1) >=0 --> x0 + x1 <= 1
# x0 >=0
# x1 >=0
#
# solution: x0=0 , x1=0
b = DMatrix([2,3])
Ai = [ blkdiag([-1,1,0]), blkdiag([-1,0,1])]
C = blkdiag([-1,0,0])
A = vertcat(Ai)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(0),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(0),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([0,0]),digits=5)
self.checkarray(dsp.output("p"),DMatrix([[1,0,0],[0,0,0],[0,0,0]]),digits=5)
self.checkarray(dsp.output("dual"),DMatrix([[0,0,0],[0,2,0],[0,0,3]]),digits=5)
@requires("DSDPSolver")
def test_linear_interpolation(self):
self.message("linear interpolation")
# min 2*a + (1-a)*4
# a
# 0 <= a <= 1
#
# Translates to:
# min 2*x0 + 4*x1
# x0,x1
# x0 + x1 -1 >= 0 |__ x0 + x1 == 1
# -(x0 + x1 -1) >= 0 |
# x0 >= 0
# x1 >= 0
b = DMatrix([2,4])
Ai = [ blkdiag([1,-1,1,0]), blkdiag([1,-1,0,1])]
e = 1e-6
C = blkdiag([1,-(1+e),0,0])
A = vertcat(Ai)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(2),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(2),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([1,0]),digits=5)
self.checkarray(dsp.output("p"),diag([0,0,1,0]),digits=5)
self.checkarray(dsp.output("dual"),diag([2,0,0,2]),digits=2)
@requires("DSDPSolver")
def test_example1(self):
self.message("Example1")
# Originates from http://sdpa.indsys.chuo-u.ac.jp/sdpa/files/sdpa-c.6.2.0.manual.pdf
b = DMatrix([48,-8,20])
Ai = [DMatrix([[10,4],[4,0]]),DMatrix([[0,0],[0,-8]]),DMatrix([[0,-8],[-8,-2]])]
A = vertcat(Ai)
makeSparse(A)
A.printMatrix()
C = DMatrix([[-11,0],[0,23]])
makeSparse(C)
dsp = DSDPSolver(C.sparsity(),A.sparsity())
dsp.init()
dsp.input("c").set(C)
dsp.input("b").set(b)
dsp.input("a").set(A)
dsp.evaluate()
self.checkarray(dsp.output("primal_cost"),DMatrix(-41.9),digits=5)
self.checkarray(dsp.output("dual_cost"),DMatrix(-41.9),digits=5)
self.checkarray(dsp.output("primal"),DMatrix([-1.1,-2.7375,-0.55]),digits=5)
self.checkarray(dsp.output("dual"),DMatrix([[5.9,-1.375],[-1.375,1]]),digits=5)
self.checkarray(dsp.output("p"),DMatrix.zeros(2,2),digits=5)
V = struct_ssym([
entry("L",shape=C.shape),
entry("x",shape=b.size())
])
L = V["L"]
x = V["x"]
P = mul(L,L.T)
g = []
g.append(sum([Ai[i]*x[i] for i in range(3)]) - C - P)
f = SXFunction([V],[mul(b.T,x)])
g = SXFunction([V],[veccat(g)])
sol = IpoptSolver(f,g)
sol.init()
sol.setInput(0,"lbg")
sol.setInput(0,"u
|
pedrogideon7/spy_quest
|
parser.py
|
Python
|
mit
| 1,938
| 0.004128
|
#! /usr/bin/env python
class ParserError(Exception):
pass
class Sentence(object):
def __init__(self, subject, verb, object):
# remember we take ('noun', 'princess') tuples and convert them
self.subject = subject[1]
self.verb = verb[1]
self.object = object[1]
def get_sentence(self):
self.sentence = ' '.join([self.subject, self.verb, self.object])
return self.sentence
def peek(word_list):
if word_list:
word = word_list[0]
return word[0]
else:
return None
def match(word_list, expecting):
if word_list:
word = word_list.pop(0)
if word[0] == expecting:
return word
else:
return None
else:
return None
def skip(word_list, word_type):
while peek(word_list) == word_type:
match(word_list, word_type)
def parse_verb(word_list):
skip(word_list, 'stop')
if peek(word_list) == 'verb':
return
|
match(word_list, 'verb')
else:
raise ParserError("Expected a verb next.")
def parse_object(word_list):
skip(word_list, 'stop')
next = peek(word_list)
if next == 'noun':
return match(word_list, 'noun')
elif next == 'di
|
rection':
return match(word_list, 'direction')
else:
raise ParserError("Expected a noun or direction next.")
def parse_subject(word_list, subj):
verb = parse_verb(word_list)
obj = parse_object(word_list)
return Sentence(subj, verb, obj)
def parse_sentence(word_list):
skip(word_list, 'stop')
start = peek(word_list)
if start == 'noun':
subj = match(word_list, 'noun')
return parse_subject(word_list, subj)
elif start == 'verb':
# assume the subject is the player then
return parse_subject(word_list, ('noun', 'player'))
else:
raise ParserError("Must start with subject, object or verb not: %s" % start)
|
clemenshage/grslra
|
experiments/6_grslra/system_identification_lti/system_identification.py
|
Python
|
mit
| 2,175
| 0.004138
|
from grslra import testdata
from grslra.grslra_batch import grslra_batch, slra_by_factorization
from grslra.structures import Hankel
from grslra.scaling import Scaling
import numpy as np
import t
|
ime
# The goal of this experiment is to identify an LTI system from a noisy outlier-contaminated and subsampled observation of its impulse response
PROFILE = 0
if PROFILE:
import cProfile
N = 80
m = 20
k = 5
sigma=0.05
outlier_rate = 0.05
outlier_amplitude = 1
rate_Omega=0.5
N_f = 20
scaling = Scaling(centering=True)
p = 0.1
x, x_0, U, Y = testdata.testdata_lti_outliers(N
|
+ N_f, m, k, rho=outlier_rate, amplitude=outlier_amplitude, sigma=sigma)
# determine scaling factor
scaling.scale_reference(x)
mu = (1-p) * (3 * sigma / scaling.factor) ** 2
# draw sampling set
card_Omega = np.int(np.round(rate_Omega * N))
Omega = np.random.choice(N, card_Omega, replace=False)
# create binary support vectors for Omega and Omega_not
entries = np.zeros((N + N_f, ))
entries[Omega] = 1
entries_not = np.ones_like(entries) - entries
# set unobserved entries in x to zero
x *= entries
x_Omega = x[Omega]
n = N + N_f - m + 1
hankel = Hankel(m, n)
grslra_params = {"PRINT": None, "VERBOSE": 1}
if PROFILE:
profile = cProfile.Profile()
profile.enable()
t_start = time.time()
l_grslra, U, Y = grslra_batch(x_Omega, hankel, k, p, mu, params=grslra_params, Omega=Omega, x_0=x_0, scaling=scaling)
t_grslra = time.time() - t_start
if PROFILE:
profile.disable()
profile.dump_stats("grslra.bin")
print "error GRSLRA: ", np.linalg.norm(l_grslra - x_0) / np.linalg.norm(x_0)
print "time GRSLRA: ", t_grslra
if PROFILE:
profile = cProfile.Profile()
profile.enable()
t_start = time.time()
l_slrabyF = slra_by_factorization(x_Omega, m, k, PRINT=0, x_0=x_0, Omega=Omega, N=N + N_f)
t_slrabyf = time.time() - t_start
if PROFILE:
profile.disable()
profile.dump_stats("slrabyf.bin")
print "error SLRA by F: ", np.linalg.norm(l_slrabyF - x_0) / np.linalg.norm(x_0)
print "time SLRA by F: ", t_slrabyf
np.savez('result_sysid_lti.npz', x_Omega=x_Omega, Omega=Omega, x_0=x_0, t_grslra=t_grslra, l_grslra=l_grslra, t_slrabyf=t_slrabyf, l_slrabyF=l_slrabyF)
|
nitin-cherian/LifeLongLearning
|
Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/nbconvert/exporters/latex.py
|
Python
|
mit
| 3,419
| 0.005557
|
"""LaTeX Exporter class"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
f
|
rom traitlets import Unicode, default
from traitlets.config import Config
from nbconvert.filters.highlight import Highlight2Latex
from nbconvert.filters.filter_links import resolve_references
from .templateexporter import TemplateExporter
class LatexExporter(TemplateExporter):
"""
Exports to a Latex template. Inherit from this class if your template is
LaTeX based and
|
you need custom tranformers/filters. Inherit from it if
you are writing your own HTML template and need custom tranformers/filters.
If you don't need custom tranformers/filters, just change the
'template_file' config option. Place your template in the special "/latex"
subfolder of the "../templates" folder.
"""
@default('file_extension')
def _file_extension_default(self):
return '.tex'
@default('template_file')
def _template_file_default(self):
return 'article.tplx'
# Latex constants
@default('default_template_path')
def _default_template_path_default(self):
return os.path.join("..", "templates", "latex")
@default('template_skeleton_path')
def _template_skeleton_path_default(self):
return os.path.join("..", "templates", "latex", "skeleton")
#Extension that the template files use.
template_extension = Unicode(".tplx").tag(config=True)
output_mimetype = 'text/latex'
def default_filters(self):
for x in super(LatexExporter, self).default_filters():
yield x
yield ('resolve_references', resolve_references)
@property
def default_config(self):
c = Config({
'NbConvertBase': {
'display_data_priority' : ['text/latex', 'application/pdf', 'image/png', 'image/jpeg', 'image/svg+xml', 'text/markdown', 'text/plain']
},
'ExtractOutputPreprocessor': {
'enabled':True
},
'SVG2PDFPreprocessor': {
'enabled':True
},
'LatexPreprocessor': {
'enabled':True
},
'SphinxPreprocessor': {
'enabled':True
},
'HighlightMagicsPreprocessor': {
'enabled':True
}
})
c.merge(super(LatexExporter,self).default_config)
return c
def from_notebook_node(self, nb, resources=None, **kw):
langinfo = nb.metadata.get('language_info', {})
lexer = langinfo.get('pygments_lexer', langinfo.get('name', None))
self.register_filter('highlight_code',
Highlight2Latex(pygments_lexer=lexer, parent=self))
return super(LatexExporter, self).from_notebook_node(nb, resources, **kw)
def _create_environment(self):
environment = super(LatexExporter, self)._create_environment()
# Set special Jinja2 syntax that will not conflict with latex.
environment.block_start_string = "((*"
environment.block_end_string = "*))"
environment.variable_start_string = "((("
environment.variable_end_string = ")))"
environment.comment_start_string = "((="
environment.comment_end_string = "=))"
return environment
|
wolverineav/neutron
|
neutron/tests/unit/services/bgp/driver/ryu/test_driver.py
|
Python
|
apache-2.0
| 12,381
| 0.000888
|
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from ryu.services.protocols.bgp import bgpspeaker
from ryu.services.protocols.bgp.rtconf.neighbors import CONNECT_MODE_ACTIVE
from neutron.services.bgp.agent import config as bgp_config
from neutron.services.bgp.driver import exceptions as bgp_driver_exc
from neutron.services.bgp.driver.ryu import driver as ryu_driver
from neutron.tests import base
# Test variables for BGP Speaker
FAKE_LOCAL_AS1 = 12345
FAKE_LOCAL_AS2 = 23456
FAKE_ROUTER_ID = '1.1.1.1'
# Test variables for BGP Peer
FAKE_PEER_AS = 45678
FAKE_PEER_IP = '2.2.2.5'
FAKE_AUTH_TYPE = 'md5'
FAKE_PEER_PASSWORD = 'awesome'
# Test variables for Route
FAKE_ROUTE = '2.2.2.0/24'
FAKE_NEXTHOP = '5.5.5.5'
class TestRyuBgpDriver(base.BaseTestCase):
def setUp(self):
super(TestRyuBgpDriver, self).setUp()
cfg.CONF.register_opts(bgp_config.BGP_PROTO_CONFIG_OPTS, 'BGP')
cfg.CONF.set_override('bgp_router_id', FAKE_ROUTER_ID, 'BGP')
self.ryu_bgp_driver = ryu_driver.RyuBgpDriver(cfg.CONF.BGP)
mock_ryu_speaker_p = mock.patch.object(bgpspeaker, 'BGPSpeaker')
self.mock_ryu_speaker = mock_ryu_speaker_p.start()
def test_add_new_bgp_speaker(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.mock_ryu_speaker.assert_called_once_with(
as_number=FAKE_LOCAL_AS1, router_id=FAKE_ROUTER_ID,
bgp_server_port=0,
best_path_change_handler=ryu_driver.best_path_change_cb,
peer_down_handler=ryu_driver.bgp_peer_down_cb,
peer_up_handler=ryu_driver.bgp_peer_up_cb)
def test_remove_bgp_speaker(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
self.ryu_bgp_driver.delete_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(0,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.assertEqual(1, speaker.shutdown.call_count)
def test_add_bgp_peer_without_password(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.add_bgp_peer(FAKE_LOCAL_AS1,
FAKE_PEER_IP,
FAKE_PEER_AS)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_add.assert_called_once_with(
address=FAKE_PEER_IP,
remote_as=FAKE_PEER_AS,
password=None,
connect_mode=CONNECT_MODE_ACTIVE)
def test_add_bgp_peer_with_password(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.add_bgp_peer(FAKE_LOCAL_AS1,
FAKE_PEER_IP,
FAKE_PEER_AS,
FAKE_AUTH_TYPE,
FAKE_PEER_PASSWORD)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_add.assert_called_once_with(
address=FAKE_PEER_IP,
remote_as=FAKE_PEER_AS,
password=FAKE_PEER_PASSWORD,
connect_mode=CONNECT_MODE_ACTIVE)
def test_remove_bgp_peer(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.delete_bgp_peer(FAKE_LOCAL_AS1, FAKE_PEER_IP)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_del.assert_called_once_with(address=FAKE_PEER_IP)
def test_advertise_route(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.advertise_route(FAKE_LOCAL_AS1,
FAKE_ROUTE,
FAKE_NEXTHOP)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.prefix_add.assert_called_once_with(prefix=FAKE_ROUTE,
next_hop=FAKE_NEXTHOP)
def test_withdraw_route(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.withdraw_route(FAKE_LOCAL_AS1, FAKE_ROUTE)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.prefix_del.assert_called_once_with(prefix=FAKE_ROUTE)
def test_add_same_bgp_speakers_twice(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.BgpSpeakerAlreadyScheduled,
self.ryu_bgp_driver.add_bgp_speaker, FAKE_LOCAL_AS1)
def test_add_different_bgp_speakers_when_one_already_added(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.BgpSpeakerMaxScheduled,
self.ryu_bgp_driver.add_bgp_speaker,
FAKE_LOCAL_AS2)
def test_add_bgp_speaker_with_invalid_asnum_paramtype(self):
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_speaker, '12345')
def test_add_bgp_speaker_with_invalid_asnum_range(self):
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_speaker, -1)
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_speaker, 65536)
def test_add_bgp_peer_with_invalid_paramtype(self):
# Test wi
|
th an invalid asnum data-type
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, '12345')
# Test with an invalid auth-type and an invalid password
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu
|
_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'sha-1', 1234)
# Test with an invalid auth-type and a valid password
self.assertRaises(bgp_driver_exc.InvaildAuthType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'hmac-md5', FAKE_PEER_PASSWORD)
# Test with none auth-type and a valid password
self.assertRaises(bgp_driver_exc.InvaildAuthType,
self.ryu_bgp_driver.add_bgp_peer,
|
upconsulting/IsisCB
|
isiscb/curation/actions.py
|
Python
|
mit
| 13,091
| 0.004278
|
"""
Asynchronous functions for bulk changes to the database.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import zip
from builtins import object
from curation.tasks import update_instance, bulk_change_tracking_state, bulk_prepend_record_history, save_creation_to_citation
from django import forms
from django.http import QueryDict
from isisdata.models import *
import isisdata.tasks as dtasks
import curat
|
ion.taskslib.citation_tasks as ctasks
import curation.taskslib.authority_tasks as atasks
from isisdata.filters import CitationFilter
import js
|
on
# TODO: refactor these actions to use bulk apply methods and then explicitly
# trigger search indexing (or whatever other post-save actions are needed).
class BaseAction(object):
def __init__(self):
if hasattr(self, 'default_value_field'):
self.value_field = self.default_value_field
if hasattr(self, 'default_value_field_kwargs'):
self.value_field_kwargs = self.default_value_field_kwargs
if hasattr(self, 'extra'):
self.extra_fields = self.extra
def get_value_field(self, **kwargs):
self.value_field_kwargs.update(kwargs)
return self.value_field(**self.value_field_kwargs)
def get_extra_fields(self, **kwargs):
if hasattr(self, 'extra_fields'):
return [(name, field(**kwargs)) for name, field, kwargs in self.extra_fields]
return []
def _build_filter_label(filter_params_raw):
citation_filter = CitationFilter(QueryDict(filter_params_raw, mutable=True))
filter_form = citation_filter.form
filter_data = {}
if filter_form.is_valid():
filter_data = filter_form.cleaned_data
return ', '.join([ '%s: %s' % (key, value) for key, value in list(filter_data.items()) if value ])
class PrependToRecordHistory(BaseAction):
model = Citation
label = u'Update record history'
default_value_field = forms.CharField
default_value_field_kwargs = {
'label': 'Prepend to record history',
'widget': forms.widgets.Textarea(attrs={'class': 'action-value'}),
}
def apply(self, user, filter_params_raw, value, **extra):
task = AsyncTask.objects.create()
type = extra['object_type'] if extra['object_type'] else None
if type:
result = bulk_prepend_record_history.delay(user.id, filter_params_raw,
value, task.id, type)
else:
result = bulk_prepend_record_history.delay(user.id, filter_params_raw,
value, task.id)
# We can use the AsyncResult's UUID to access this task later, e.g.
# to check the return value or task state.
task.async_uuid = result.id
task.value = ('record_status_explanation', value)
task.label = 'Updating set with filters: ' + _build_filter_label(filter_params_raw)
task.save()
return task.id
class StoreCreationDataToModel(BaseAction):
model = Citation
label = u'Store creation data to citations'
default_value_field = forms.CharField
default_value_field_kwargs = {
'label': 'Storing creation data to citations',
'widget': forms.widgets.Textarea(attrs={'class': 'action-value', 'readonly': True, 'initial': 'Storing creation data'}),
}
def apply(self, user, filter_params_raw, value, **extra):
task = AsyncTask.objects.create()
type = extra['object_type'] if extra['object_type'] else None
if type:
result = save_creation_to_citation.delay(user.id, filter_params_raw,
value, task.id, type)
result = save_creation_to_citation.delay(user.id, filter_params_raw,
value, task.id)
# We can use the AsyncResult's UUID to access this task later, e.g.
# to check the return value or task state.
task.async_uuid = result.id
task.value = ('created_native', '')
task.label = 'Storing creator in citation for set with filters: ' + _build_filter_label(filter_params_raw)
task.save()
return task.id
class SetRecordStatus(BaseAction):
model = Citation
label = u'Set record status'
default_value_field = forms.ChoiceField
default_value_field_kwargs = {
'choices': CuratedMixin.STATUS_CHOICES,
'label': 'Set record status',
'widget': forms.widgets.Select(attrs={'class': 'action-value'}),
}
def apply(self, user, filter_params_raw, value, **extra):
# We need this to exist first so that we can keep it up to date as the
# group of tasks is executed.
task = AsyncTask.objects.create()
type = extra['object_type'] if extra['object_type'] else None
if type:
result = dtasks.bulk_update_citations.delay(user.id,
filter_params_raw,
'record_status_value',
value, task.id, type)
else:
result = dtasks.bulk_update_citations.delay(user.id,
filter_params_raw,
'record_status_value',
value, task.id)
# We can use the AsyncResult's UUID to access this task later, e.g.
# to check the return value or task state.
task.async_uuid = result.id
task.value = ('record_status_value', value)
task.label = 'Updating set with filters: ' + _build_filter_label(filter_params_raw)
task.save()
return task.id
class SetRecordStatusExplanation(BaseAction):
model = Citation
label = u'Set record status explanation'
default_value_field = forms.CharField
default_value_field_kwargs = {
'label': 'Set record status explanation',
'widget': forms.widgets.TextInput(attrs={'class': 'action-value'}),
}
def apply(self, user, filter_params_raw, value, **extra):
task = AsyncTask.objects.create()
type = extra['object_type'] if extra['object_type'] else None
if type:
result = dtasks.bulk_update_citations.delay(user.id,
filter_params_raw,
'record_status_explanation',
value, task.id, type)
else:
result = dtasks.bulk_update_citations.delay(user.id,
filter_params_raw,
'record_status_explanation',
value, task.id)
# We can use the AsyncResult's UUID to access this task later, e.g.
# to check the return value or task state.
task.async_uuid = result.id
task.value = ('record_status_explanation', value)
task.label = 'Updating set with filters: ' + _build_filter_label(filter_params_raw)
task.save()
return task.id
def get_tracking_transition_counts(qs):
states = list(zip(*qs.model.TRACKING_CHOICES))[0]
transitions = dict(list(zip(states, [qs.filter(tracking_state=state).count() for state in states])))
# bugfix for Zotero imports: tracking_state is None not "NO"
transitions[qs.model.NONE] += qs.filter(tracking_state=None).count()
return transitions
def get_allowable_transition_states():
from curation.tracking import TrackingWorkflow
return dict([(target, source) for source, target in TrackingWorkflow.transitions])
def get_transition_labels():
from curation.tracking import TrackingWorkflow
return dict(Tracking.TYPE_CHOICES)
class SetTrackingStatus(BaseAction):
model = Citation
label = u'Set record tracking status'
default_value_field = forms.ChoiceField
default_value_field_kwargs = {
'choices': Tracking.
|
jkandasa/integration_tests
|
fixtures/parallelizer/__init__.py
|
Python
|
gpl-2.0
| 27,463
| 0.002185
|
"""Parallel testing, supporting arbitrary collection ordering
The Workflow
------------
- Master py.test process starts up, inspects config to decide how many slave to start, if at all
- env['parallel_base_urls'] is inspected first
- py.test config.option.appliances and the related --appliance cmdline flag are used
if env['parallel_base_urls'] isn't set
- if neither are set, no parallelization happens
- Slaves are started
- Master runs collection, blocks until slaves report their collections
- Slaves each run collection and submit them to the master, then block inside their runtest loop,
waiting for tests to run
- Master diffs slave collections against its own; the test ids are verified to match
across all nodes
- Master enters main runtest loop, uses a generator to build lists of test groups which are then
sent to slaves, one group at a time
- For each phase of each test, the slave serializes test reports, which are then unserialized on
the master and handed to the normal pytest reporting hooks, which is able to deal with test
reports arriving out of order
- Before running the last test in a group, the slave will request more tests from the master
- If more tests are received, they are run
- If no tests are received, the slave will shut down after running its final test
- After all slaves are shut down, the master will do its end-of-session reporting as usual, and
shut down
"""
from itertools import groupby
import difflib
import json
import os
import signal
import subprocess
from collections import defaultdict, deque, namedtuple
from datetime import datetime
from itertools import count
import attr
from threading import Thread
from time import sleep, time
import pytest
import zmq
from _pytest import runner
from fixtures import terminalreporter
from fixtures.parallelizer import remote
from fixtures.pytest_store import store
from cfme.utils import at_exit, conf
from cfme.utils.log import create_sublogger
from cfme.utils.path import conf_path
# Initialize slaveid to None, indicating this as the master process
# slaves will set this to a unique string when they're initialized
conf.runtime['env']['slaveid'] = None
if not conf.runtime['env'].get('ts'):
ts = str(time())
conf.runtime['env']['ts'] = ts
def pytest_addhooks(pluginmanager):
import hooks
pluginmanager.add_hookspecs(hooks)
@pytest.mark.trylast
def pytest_configure(config):
"""Configures the parallel session, then fires pytest_parallel_configured."""
reporter = terminalreporter.reporter()
holder = config.pluginmanager.get_plugin("appliance-holder")
appliances = holder.appliances
if len(appliances) > 1:
session = ParallelSession(config, appliances)
config.pluginmanager.register(session, "parallel_session")
store.parallelizer_role = 'master'
reporter.write_line(
'As a parallelizer master kicking off parallel session for these {} appliances'.format(
len(appliances)),
green=True)
config.hook.pytest_parallel_configured(parallel_session=session)
else:
reporter.write_line('No parallelization required', green=True)
config.hook.pytest_parallel_configured(parallel_session=None)
def handle_end_session(signal, frame):
# when signaled, end the current test session immediately
if store.parallel_session:
store.parallel_session.session_finished = True
signal.signal(signal.SIGQUIT, handle_end_session)
@attr.s(hash=False)
class SlaveDetail(object):
slaveid_generator = ('slave{:02d}'.format(i) for i in count())
appliance = attr.ib()
id = attr.ib(default=attr.Factory(
lambda: next(SlaveDetail.slaveid_generator)))
forbid_restart = attr.ib(default=False, init=False)
tests = attr.ib(default=attr.Factory(set), repr=False)
process = attr.ib(default=None, repr=False)
provider_allocation = attr.ib(default=attr.Factory(list), repr=False)
def start(self):
if self.forbid_restart:
return
devnull = open(os.devnull, 'w')
# worker output redirected to null; useful info comes via messages and logs
self.process = subprocess.Popen(
['python', remote.__file__, self.id, self.appliance.as_json, conf.runtime['env']['ts']],
stdout=devnull,
)
at_exit(self.process.kill)
def poll(self):
if self.process is not None:
return self.process.poll()
class ParallelSession(object):
def __init__(self, config, appliances):
self.config = config
self.session = None
self.session_finished = False
self.countfailures = 0
self.collection = []
self.sent_tests = 0
self.log = create_sublogger('master')
self.maxfail = config.getvalue("maxfail")
self._failed_collection_errors = {}
self.terminal = store.terminalreporter
self.trdist = None
self.slaves = {}
self.test_groups = self._test_item_generator()
self._pool = []
from cfme.utils.conf import cfme_data
self.provs = sorted(set(cfme_data['management_systems'].keys()),
key=len, reverse=True)
self.used_prov = set()
self.failed_slave_test_groups = deque()
self.slave_spawn_count = 0
self.appliances = appliances
# set up the ipc socket
zmq_endpoint = 'ipc://{}'.format(
config.cache.makedir('parallelize').join(str(os.getpid())))
ctx = zmq.Context.instance()
self.sock = ctx.socket(zmq.ROUTER)
self.sock.bind(zmq_endpoint)
# clean out old slave config if it exists
slave_config = conf_path.join('slave_config.yaml')
slave_config.check() and slave_config.remove()
# write out the slave config
conf.runtime['slave_config'] = {
'args': self.config.args,
'options': dict( # copy to avoid aliasing
self.config.option.__dict__,
use_sprout=False, # Slaves don't use sprout
),
'zmq_endpoint': zmq_endpoint,
}
if hasattr(self, "slave_appliances_data"):
conf.runtime['slave_config']["appliance_data"] = self.slave_appliances_data
conf.save('slave_config')
for appliance in self.appliances:
slave_data = SlaveDetail(appliance=appliance)
self.slaves[slave_data.id] = slave_data
for slave in sorted(self.slaves):
self.print_message("using appliance {}".format(self.slaves[slave].appliance.url),
slave, green=True)
def _slave_audit(self):
# XXX: There is currently no mechanism to add or remove slave_urls, short of
# firing up the debugger and doing it manually. This is making room for
# planned future abilities to dynamically add and remove slaves via automation
# check for unexpected slave shutdowns and redistribute tests
for slave in self.slaves.values():
returncode = slave.poll()
if returncode:
slave.process = None
if returncode == -9:
|
msg = '{} killed due to error, respawning'.format(slave.id)
else:
msg = '{} terminated unexpectedly with status {}, respawning'.format(
slave.id, returncode)
if slave.tests:
failed_tests, slave.tests = slave.tests, set()
num_failed_te
|
sts = len(failed_tests)
self.sent_tests -= num_failed_tests
msg += ' and redistributing {} tests'.format(num_failed_tests)
self.failed_slave_test_groups.append(failed_tests)
self.print_message(msg, purple=True)
# If a slave was terminated for any reason, kill that slave
# the terminated flag implies the appliance has died :(
for slave in list(self.slaves.values()):
if slave.forbid_restart:
if slave.process is None:
self.config.hook.pytest_miq_node_shutdo
|
deapplegate/wtgpipeline
|
quality_studies_psf.py
|
Python
|
mit
| 15,321
| 0.024672
|
import numpy, sys, os, pylab, astropy, astropy.io.fits as pyfits, ldac, math
def open_and_get_shearcat(filename, tablename):
#
# for opening and retrieving shear cat.
#
return ldac.openObjectFile(filename, tablename)
#class ello
def avg_shear(g1array, g2array):
avg1 = numpy.mean(g1array)
avg2 = numpy.mean(g2array)
# leave open possibility of weighted average
return [avg1,avg2]
def avg_shear_aniscorr(g1array, g2array, epol1, epol2):
# g1 array : numpy array of g1
# g2 array : numpy array of g2
# e1po1 array: array of e1 correction at gal position
# e2pol array: array of e2 correction at gal position
# Average shear in bins of the ell correction
# this may be defunct.
# get indices of sorted (by epol) arrays
indx = numpy.lexsort((g1array,epol1))
indy = numpy.lexsort((g2array,epol2))
sortedx = []
sortedy = []
binsy =[0] # first bin 0
binsx =[0] #
binwidth = len(g1array) / 10 # 10 bins
for j in range(1,10):
binsx.append(epol1[ind[j*binwidth]]+0.00001)
binsy.append(epol2[ind[j*binwidth]]+0.00001)
binsx.append(epol1[ind[-1]]+0.00001)
binsy.append(epol2[ind[-1]]+0.00001)
for i in range(len(g1array)):
sortedx.append([g1array[indx[i]],epol1[indx[i]]])
sortedy.append([g2array[indx[i]],epol2[indx[i]]])
xarr = numpy.array(sortedx)
yarr = numpy.array(sortedy)
xavgs = []
yavgs = []
for j in range(10):
xavgs.append(numpy.average(xarr[binsx[j]:binsx[j+1],0]))
yavgs.append(numpy.average(yarr[binsy[j]:binsy[j+1],0]))
return xavgs, binsx, yavgs, binsy
# lets make 10 bins
def avg_epol_gamma(g1array, g2array, epol1, epol2):
# g1 array : numpy array of g1
# g2 array : numpy array of g2
# e1po1 array: array of e1 correction at gal position
# e2pol array: array of e2 correction at gal position
avg1 = numpy.mean(g1array*epol1)
err1 = numpy.std(g1array*epol1)/math.sqrt(len(epol1)*1.0)
err1bs = do_bootstrap_error(g1array*epol1)
avg2 = numpy.mean(g2array*epol2)
err2 = numpy.std(g2array*epol2)/math.sqrt(len(epol2)*1.0)
err2bs = do_bootstrap_error(g2array*epol2)
# print avg1,avg2,err1,err2
return avg1,avg2,err1,err2,err1bs, err2bs
def star_gal_correlation(galarray, stararray):
# galarray: array with galaxy positions and
# shears values.
# stararray: array with star positions and
# ell values values.
gal_g1arr = galarray['g1']
gal_g2arr = galarray['g2']
gal_xarr = galarray['x']
gal_yarr = galarray['y']
star_xarr = stararray['x']
star_yarr = stararray['y']
star_e1pol = stararray['e1pol']
star_e2pol = stararray['e2pol']
star_e1 = stararray['e1']
star_e2 = stararray['e2']
# create full arrays for correlations
# star arrays : 1111... 22222... 3333...
# gal arrays : 1234... 12345... 1234
#
starlen=len(stararray['e1'])
gallen=len(galarray['g1'])
gal_g1corr = make_gal_corrarray(galarray['g1'],starlen)
gal_g2corr = make_gal_corrarray(galarray['g2'],starlen)
gal_xcorr = make_gal_corrarray(galarray['x'],starlen)
gal_ycorr = make_gal_corrarray(galarray['y'],starlen)
star_e1corr = make_star_corrarray(stararray['e1'],gallen)
star_e2corr = make_star_corrarray(stararray['e2'],gallen)
star_xcorr = make_star_corrarray(stararray['x'],gallen)
star_ycorr = make_star_corrarray(stararray['y'],gallen)
distcorr = numpy.sqrt((star_xcorr-gal_xcorr)*(star_xcorr-gal_xcorr)+ \
(star_ycorr-gal_ycorr)*(star_ycorr-gal_ycorr))
xi_pp = gal_g1corr*star_e1corr + gal_g2corr*star_e2corr
#star autocorrelation
emagarray=numpy.sqrt(stararray['e1']*stararray['e1']+stararray['e2']*stararray['e2'])
emagautocorr=numpy.zeros((starlen*(starlen-1))/2)
edistautocorr=numpy.zeros((starlen*(starlen-1))/2)
iterator=0
# I'm sure there's a better way to do this.
for i in range(len(emagarray)):
for j in range(i+1,len(emagarray)):
emagautocorr[iterator]=emagarray[i]*emagarray[j]
edistautocorr[iterator]=math.sqrt(((stararray['x'][i]-stararray['x'][j])*\
(stararray['x'][i]-stararray['x'][j]))+\
((stararray['y'][i]-stararray['y'][j])*\
(stararray['y'][i]-stararray['y'][j])))
iterator=iterator + 1
return xi_pp, distcorr, emagautocorr, edistautocorr
def make_gal_corrarray( objarray, n ):
m = len(objarray)
a = numpy.array(numpy.zeros((n*m)))
for k in range(n):
a[k*m:(k+1)*m]=objarray
return a
def make_star_corrarray( objarray, n ):
m = len(objarray)
a = numpy.array(numpy.zeros((n*m)))
for k in range(m):
a[k*n:(k+1)*n]=objarray[k]
return a
def do_bootstrap_error(inputarray, nbootstraps=100):
n = len(inputarray)
npars=inputarray[numpy.random.random_integers(0,n-1,(n,nbootstraps))]
meanlist = numpy.mean(npars,0)
if len(meanlist) != nbootstraps:
print 'averaging across wrong axis'
return numpy.std(meanlist)
#
# switch to polar coordinates
#
def cartesianToPolar(x, y):
r = numpy.sqrt(x**2+y**2)
phi = numpy.arccos(x/r)
phi2 = phi = 2. * numpy.pi - phi
phi_yp = y>=0.
phi2_yp = y<0.
phi = phi* phi_yp +phi2* phi2_yp
return r, phi
#
# make the plots
#
def make_scatter_inputs(yvals, xvals,therange, nbins=10):
if len(yvals) != len(xvals):
print len(yvals), ' doeas not equal ',len(xvals)
vals, thebins = pylab.histogram(xvals, weights=yvals, bins=nbins,range=therange)
vals_sq, thebins = pylab.histogram(xvals, weights=yvals*yvals, bins=nbins ,range=therange)
vals_n, thebins = pylab.histogram(xvals, bins=nbins,range=therange)
val_errs = numpy.sqrt((vals_sq/vals_n) - (vals/vals_n)*(vals/vals_n))/numpy.sqrt(vals_n)
bincenters=[]
binerrs=[]
# print 'The Bins = ', thebins
for k in range(len(thebins)-1):
bincenters.append((thebins[k]+thebins[k+1])/2.)
binerrs.append((thebins[k+1]-thebins[k])/2.)
# print 'bincenters = ',bincenters
return bincenters, vals/vals_n, binerrs, val_errs
def get_percentiles(arr):
# return 10 and 90 %iles
sorted = numpy.sort(arr)
n = len(sorted)
val = n/10
return sorted[val],sorted[n-val]
if __name__ == "__main__":
filename_gal = sys.argv[1]
filename_star = sys.argv[2]
if len(sys.argv)==3:
outfilename = 'psfplots.png'
elif len(sys.argv)==4:
outfilename = sys.argv[3]
else:
print 'usage: ./quality_studies_psf.py [galaxy_shear.cat] [star.cat] [output=psfplots.png]'
sys.exit(1)
galcat = open_and_get_shearcat(filename_gal,'OBJECTS')
starcat = open_and_get_shearcat(filename_star,'OBJECTS')
if galcat:
print ' got Galaxy cat'
if starcat:
print ' got Star cat'
maxrg=numpy.max(starcat['rg'])
galcat = galcat.filter(galcat['rg']>maxrg)
galcat = galcat.filter(galcat['Flag']==0)
gal_g1arr = numpy.array(galcat['gs1'])
gal_g2arr = numpy.array(galcat['gs2'])
gal_xarr = numpy.array(galcat['x'])
gal_yarr = numpy.array(galcat['y'])
gal_e1corr = numpy.array(galcat['e1corrpol'])
gal_e2corr = numpy.array(galcat['e2corrpol'])
star_xarr = numpy.ar
|
ray(starcat['x'])
star_yarr = numpy.array(starcat['y'])
star_e1corr = numpy.array(starcat['e1corrpol'])
star_e2corr = numpy.array(starcat['e2corrpol'
|
])
star_e1 = numpy.array(starcat['e1'])
star_e2 = numpy.array(starcat['e2'])
pylab.rc('text', usetex=True)
pylab.figure(figsize=(15,10) ,facecolor='w')
pylab.subplots_adjust(wspace=0.3,hspace=0.3)
pylab.subplot(231,axisbg='w')
pylab.cool()
# Qualtest 1 : Average shear:
avg_gs1 = numpy.mean(gal_g1arr)
err_gs1 = numpy.std(gal_g1arr)/math.sqrt(len(gal_g1arr*1.0))
err_gs1bs =
|
floodlight/ivs
|
build/oftest.py
|
Python
|
epl-1.0
| 13,000
| 0.006692
|
#!/usr/bin/python
################################################################
#
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Eclipse Public License, Version 1.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the
# License.
#
################################################################
_help="""
-------------------------------------------------------------------------------
NAME
oftest.py - Run OFTest against IVS
SYNOPSIS
oftest.py [--ivs-args=...] [--oft-args=...] [--test-file|-f=...] [--test-spec|-t=...]
DESCRIPTION
This script automates the execution of OFTest suites against
the IVS binary. You can use it to execute any subset of
tests against your locally built IVS binary.
This script is used in the automated testing tasks.
This script can be used by developers for manual tests.
OPTIONS
--test-spec, -T The oftest test-spec you want to execute.
This parameter is required. If you want to run
all tests, specify "all".
--test-file, -f Path to an OFTest test-file.
--log-base-dir Set Log base directory.
NOTES
You must set the following environment variables before
using this script:
$OFTEST Set to the top of the OFTest repository.
LOGFILES
The output from IVS is stored in 'testlogs/OFTest/{testname}/ivs.log
The output from oftest is store in 'testlogs/OFTest/{testname}/output.log
The oft.log file is stored in 'testlogs/OFTest/{testname}/oft.log
EXAMPLES
# Run all oftests against IVS:
> build/oftest.py -T all
"""
import os
import sys
import time
import argparse
import random
import subprocess
import pprint
import platform
import datetime
import StringIO
import signal
import select
import platform
import logging
import re
# Prevent ratelimiters from causing test failures
os.environ['INDIGO_BENCHMARK'] = '1'
###############################################################################
#
# Helpers
#
####################################
|
###########################################
def dirlist(d):
if d == None:
return [ "." ]
if type(d) == str:
return [ d ]
if type(d) != list:
raise Exception("'%s' is a bad dirlist" % d)
return d
def fselect(name, tops, subs, p=False):
tops = dirlist(tops)
subs = dirlist(subs)
for top in tops:
for sub in subs:
f = "%s/%s/%s" % (top, sub, name)
if os.path.exists(f):
return f
if
|
p:
print "%s: not found" % f
if p == False:
fselect(name, tops, subs, p=True)
raise Exception("Could not find the '%s' binary. Search paths were %s:%s" % (name, tops, subs))
def system(command, die=False):
logging.debug("Running %s ", command)
rv = os.system(command)
if rv != 0 and die:
raise Exception(" [ %s ] FAILED: %d" % (command, rv))
return rv
def randomports(count):
return random.sample(xrange(30000, 32000), count)
def requirePathEnv(name):
p = os.getenv(name)
if p is None:
raise Exception("You must set the $%s variable." % name)
if not os.path.isdir(p):
raise Exception("The $%s variable does not point to a directory." % name)
return p
###############################################################################
IVS_BASEDIR = os.path.join(os.path.dirname(__file__), "..")
OFTEST_BASEDIR = requirePathEnv("OFTEST")
LOG_BASEDIR = "%s/testlogs/oftest" % (IVS_BASEDIR)
OFT = fselect("oft", OFTEST_BASEDIR, ".")
IVS_BINARY = fselect("ivs", IVS_BASEDIR, ["targets/ivs/build/gcc-local/bin"]);
if sys.stderr.isatty():
RED = "\x1B[31m"
GREEN = "\x1B[32m"
NORM = "\x1B[39m"
else:
RED = ""
GREEN = ""
NORM = ""
class VethNetworkConfig(object):
def __init__(self, portCount):
self.caddr = "127.0.0.1"
self.cport = randomports(1)[0]
self.switchInterfaces = ["veth%d" % (i*2) for i in range(portCount)]
self.oftestInterfaces = ["%d@veth%d" % (i+1, i*2+1) for i in range(portCount)]
def listOFTests(spec=None, testfile=None, openflowVersion=None, testDir=None):
args = [ OFT, "--list-test-names" ]
if spec:
args.append(spec)
if testfile:
args.append("--test-file=%s" % testfile)
if openflowVersion:
args.append("-V%s" % openflowVersion)
if testDir:
args.append("--test-dir=%s" % testDir)
stdout = subprocess.check_output(args);
return stdout.splitlines();
def runOFTest(test, networkConfig, logDir, openflowVersion, testDir=None, oftArgs=None):
args = [ OFT,
"-H", str(networkConfig.caddr),
"-p", str(networkConfig.cport),
"--verbose",
"--log-file", "%s/oft.log" % logDir,
"--fail-skipped" ]
args.append("-V%s" % openflowVersion)
for iface in networkConfig.oftestInterfaces:
args.append('-i')
args.append(iface)
if testDir:
args.append("--test-dir=%s" % testDir)
if oftArgs:
args = args + oftArgs
args.append(test)
with open("%s/oft.stdout.log" % (logDir), "w") as logfile:
child = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=logfile,
stderr=subprocess.STDOUT)
if not child:
raise Exception("Failed to start: ", args)
child.wait()
return child.returncode;
class IVS(object):
def __init__(self, networkConfig, logDir, openflowVersion, ivsArgs=None):
self.networkConfig = networkConfig
self.logDir = logDir
self.openflowVersion = openflowVersion
self.ivsArgs = ivsArgs
self.child = None
def start(self):
args = [ IVS_BINARY,
"-c", "%s:%d" % (self.networkConfig.caddr, self.networkConfig.cport) ]
args.append("-V%s" % self.openflowVersion)
if self.ivsArgs:
args += self.ivsArgs
for iface in self.networkConfig.switchInterfaces:
args.append("-i");
args.append(iface);
with open("%s/ivs.log" % (self.logDir), "w") as logfile:
self.child = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=logfile,
stderr=subprocess.STDOUT)
if self.child is None:
raise Exception("Failed to start IVS")
def stop(self):
if self.child:
self.child.send_signal(signal.SIGTERM)
self.child.wait()
self.child = None
# BSN test system integration
class AbatTask(object):
def __init__(self):
self.abatId = os.getenv("ABAT_ID");
assert(self.abatId)
self.abatTimestamp = os.getenv("ABAT_TIMESTAMP")
self.abatTask = os.getenv("ABAT_TASK")
self.abatWorkspace = "%s-%s" % (self.abatTimestamp, self.abatTask)
self.bscBaseDir = requirePathEnv("BSC");
self.runIds = {}
def addTestcase(self, test, testLogDir):
logUrl = "http://%s/abat/%s/%s" % (platform.node(), self.abatWorkspace, testLogDir)
runId = os.popen("%s/build/add-testcase.py %s %s %s %s | tail -n 1" % (
self.bscBaseDir, self.abatId, test, "OFTest", logUrl)).read().rstrip()
self.runIds[test] = runId
def updateTestcase(self, test, result):
system("%s/build/update-testcase.py %s %s" % (
self.bscBaseDir, self.runIds[test], result))
class AutotestIVS(object):
def __init__(self, config):
self.confi
|
JBonsink/GSOC-2013
|
tools/ns-allinone-3.14.1/ns-3.14.1/src/config-store/bindings/modulegen__gcc_LP64.py
|
Python
|
gpl-3.0
| 54,535
| 0.013588
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
|
import sys
def module_init():
root_module = Module('ns.config_store', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## file-config.h (module 'config-store'): ns3::FileConfig [class]
|
module.add_class('FileConfig', allow_subclassing=True)
## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore [class]
module.add_class('GtkConfigStore')
## file-config.h (module 'config-store'): ns3::NoneFileConfig [class]
module.add_class('NoneFileConfig', parent=root_module['ns3::FileConfig'])
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## config-store.h (module 'config-store'): ns3::ConfigStore [class]
module.add_class('ConfigStore', parent=root_module['ns3::ObjectBase'])
## config-store.h (module 'config-store'): ns3::ConfigStore::Mode [enumeration]
module.add_enum('Mode', ['LOAD', 'SAVE', 'NONE'], outer_class=root_module['ns3::ConfigStore'])
## config-store.h (module 'config-store'): ns3::ConfigStore::FileFormat [enumeration]
module.add_enum('FileFormat', ['XML', 'RAW_TEXT'], outer_class=root_module['ns3::ConfigStore'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3FileConfig_methods(root_module, root_module['ns3::FileConfig'])
register_Ns3GtkConfigStore_methods(root_module, root_module['ns3::GtkConfigStore'])
register_Ns3NoneFileConfig_methods(r
|
gcushen/mezzanine-api
|
tests/test_post.py
|
Python
|
mit
| 10,996
| 0.003365
|
from __future__ import unicode_literals, print_function
from django.urls import reverse
from rest_framework import status
from mezzanine.blog.models import BlogPost as Post
from tests.utils import TestCase
class TestPostViewSet(TestCase):
"""
Test the API resources for blog posts (read and write)
"""
def setUp(self):
"""
Setup the tests
Create some published and draft blog posts for API retrieval testing
"""
super(TestPostViewSet, self).setUp()
# Note for using status:
# from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
# status=CONTENT_STATUS_PUBLISHED
self.post_draft = Post.objects.create(
title="Draft Post Title",
content="Draft Content",
status=1,
user=self.user)
self.post_published = Post.objects.create(
title="Published Post Title",
content="Published Content",
publish_date='2016-01-01T00:00Z',
user=self.user)
def tearDown(self):
"""
Clean up after the tests
"""
super(TestPostViewSet, self).tearDown(
|
)
self.post_draft.delete()
self.post_published.delete()
def test_list_published_posts(self):
"""
Test API list all published blog posts
"""
url = reverse('blogpost-list')
response = self.client.get(url, format='json')
self.assertEqual(respon
|
se.status_code, status.HTTP_200_OK)
self.assertEqual(response['Content-type'], 'application/json')
self.assertEqual(response.data['count'], 1)
self.assertEqual(response.data['results'][0]['title'], self.post_published.title)
def test_retrieve_published_post(self):
"""
Test API retrieve the published blog post that we created earlier
"""
url = '/api/posts/{}'.format(self.post_published.pk)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['title'], self.post_published.title)
def test_retrieve_draft_post(self):
"""
Test that retrieving a draft post fails since the API only allows read access to published posts
"""
url = '/api/posts/{}'.format(self.post_draft.pk)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_create_as_superuser_token(self):
"""
Test API POST CREATE whilst authenticated via OAuth2 as a superuser
"""
# Note: we do not directly provide user here, as API should automatically get and
# authenticate current user as the author
post_data = {'title': 'title1', 'content': 'content1', 'publish_date': '2016-01-01T00:00Z',
'categories': 'Machine Learning,Statistics'}
url = '/api/posts'
response = self.client.post(url, post_data, format='json', HTTP_AUTHORIZATION=self.auth_valid)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Post.objects.get(pk=response.data['id']).user, self.superuser)
self.assertEqual(Post.objects.get(pk=response.data['id']).title, post_data['title'])
self.assertEqual(Post.objects.get(pk=response.data['id']).content, post_data['content'])
self.assertEqual(self.get_categories_as_delim_str(Post.objects.get(pk=response.data['id'])),
post_data['categories'])
def test_create_as_superuser(self):
"""
Test API POST CREATE whilst authenticated as a superuser
"""
post_data = {'title': 'title2', 'content': 'content2', 'publish_date': '2016-01-01T00:00Z',
'categories': 'Machine Learning'}
url = '/api/posts'
self.client.force_authenticate(user=self.superuser)
response = self.client.post(url, post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Post.objects.get(pk=response.data['id']).user, self.superuser)
self.assertEqual(Post.objects.get(pk=response.data['id']).title, post_data['title'])
self.assertEqual(Post.objects.get(pk=response.data['id']).content, post_data['content'])
self.assertEqual(self.get_categories_as_delim_str(Post.objects.get(pk=response.data['id'])),
post_data['categories'])
def test_create_as_user(self):
"""
Test API POST CREATE whilst authenticated as a standard user
"""
post_data = {'title': 'a', 'content': 'b', 'publish_date': '2016-01-01T00:00Z'}
url = '/api/posts'
self.client.force_authenticate(user=self.user)
response = self.client.post(url, post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_as_guest(self):
"""
Test API POST CREATE whilst unauthenticated as a guest
"""
post_data = {'title': 'a', 'content': 'b', 'publish_date': '2016-01-01T00:00Z'}
url = '/api/posts'
self.client.force_authenticate(user=None)
response = self.client.post(url, post_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_as_superuser_token(self):
"""
Test API PUT UPDATE whilst authenticated via OAuth2 as a superuser
"""
put_data = {'title': 'a', 'content': 'b', 'categories': 'cat1,cat2'}
url = '/api/posts/{}'.format(self.post_published.pk)
response = self.client.put(url, put_data, format='json', HTTP_AUTHORIZATION=self.auth_valid)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Post.objects.get(pk=self.post_published.pk).title, put_data['title'])
self.assertEqual(Post.objects.get(pk=self.post_published.pk).content, put_data['content'])
self.assertEqual(self.get_categories_as_delim_str(Post.objects.get(pk=self.post_published.pk)),
put_data['categories'])
def test_update_as_user(self):
"""
Test API PUT UPDATE whilst authenticated as a standard user
"""
put_data = {'title': 'a', 'content': 'b'}
url = '/api/posts/{}'.format(self.post_published.pk)
self.client.force_authenticate(user=self.user)
response = self.client.put(url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_update_as_guest(self):
"""
Test API PUT UPDATE whilst unauthenticated as a guest
"""
put_data = {'title': 'a', 'content': 'b'}
url = '/api/posts/{}'.format(self.post_published.pk)
self.client.force_authenticate(user=None)
response = self.client.put(url, put_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_categories_unchanged(self):
"""
Test API PUT UPDATE title and test categories remain unchanged
whilst authenticated via OAuth2 as a superuser
"""
original_content = Post.objects.get(pk=self.post_published.pk).content
original_categories = self.get_categories_as_delim_str(Post.objects.get(pk=self.post_published.pk))
put_data = {'title': 'updated title'}
url = '/api/posts/{}'.format(self.post_published.pk)
response = self.client.put(url, put_data, format='json', HTTP_AUTHORIZATION=self.auth_valid)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Post.objects.get(pk=self.post_published.pk).title, put_data['title'])
self.assertEqual(Post.objects.get(pk=self.post_published.pk).content, original_content)
self.assertEqual(self.get_categories_as_delim_str(Post.objects.get(pk=self.post_published.pk)),
original_categories)
def test_update_categories_disassociate_one(self):
"""
Test API PUT UPDATE disassociate category 'cat2'
|
tomjelinek/pcs
|
pcs/cli/routing/resource_stonith_common.py
|
Python
|
gpl-2.0
| 2,770
| 0
|
from typing import (
Any,
List,
)
from pcs import resource
from pcs.cli.common.parse_args import InputModifiers
from pcs.cli.common.routing import (
CliCmdInterface,
create_router,
)
def resource_defaults_cmd(parent_cmd: List[str]) -> CliCmdInterface:
def
|
_get_router(
lib: Any, argv: List[str], modifiers: InputModifiers
) -> None:
"""
Options:
* -f - CIB file
* --force - allow unknown options
"""
if argv and "=" in argv[0]:
# DEPRECATED legacy command
return resource.resource_defaults_legacy_cmd(
lib, argv, modifiers
|
, deprecated_syntax_used=True
)
router = create_router(
{
"config": resource.resource_defaults_config_cmd,
"set": create_router(
{
"create": resource.resource_defaults_set_create_cmd,
"delete": resource.resource_defaults_set_remove_cmd,
"remove": resource.resource_defaults_set_remove_cmd,
"update": resource.resource_defaults_set_update_cmd,
},
parent_cmd + ["set"],
),
"update": resource.resource_defaults_legacy_cmd,
},
parent_cmd,
default_cmd="config",
)
return router(lib, argv, modifiers)
return _get_router
def resource_op_defaults_cmd(parent_cmd: List[str]) -> CliCmdInterface:
def _get_router(
lib: Any, argv: List[str], modifiers: InputModifiers
) -> None:
"""
Options:
* -f - CIB file
* --force - allow unknown options
"""
if argv and "=" in argv[0]:
# DEPRECATED legacy command
return resource.resource_op_defaults_legacy_cmd(
lib, argv, modifiers, deprecated_syntax_used=True
)
router = create_router(
{
"config": resource.resource_op_defaults_config_cmd,
"set": create_router(
{
"create": resource.resource_op_defaults_set_create_cmd,
"delete": resource.resource_op_defaults_set_remove_cmd,
"remove": resource.resource_op_defaults_set_remove_cmd,
"update": resource.resource_op_defaults_set_update_cmd,
},
parent_cmd + ["set"],
),
"update": resource.resource_op_defaults_legacy_cmd,
},
parent_cmd,
default_cmd="config",
)
return router(lib, argv, modifiers)
return _get_router
|
decebel/dataAtom_alpha
|
bin/plug/py/external/pattern/text/en/__init__.py
|
Python
|
apache-2.0
| 3,292
| 0.008202
|
#### PATTERN | EN ##################################################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
# English linguistical tools using fast regular expressions.
from inflect import \
article, referenced, DEFINITE, INDEFINITE, \
pluralize, singularize, NOUN, VERB, ADJECTIVE, \
conjugate, lemma, lexeme, tenses, VERBS, \
grade, comparative, superlative, COMPARATIVE, SUPERLATIVE, \
predicative, attributive, \
INFINITIVE, PRESENT, PAST, FUTURE, \
FIRST, SECOND, THIRD, \
SINGULAR, PLURAL, SG, PL, \
PROGRESSIVE, \
PARTICIPLE
from inflect.quantify import \
number, numerals, quantify, reflect
from inflect.spelling import \
suggest as spelling
from parser import tokenize, parse, tag
from parser.tree import Text, Sentence, Slice, Chunk, PNPChunk, Chink, Word, table
from parser.tree import SLASH, WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA, AND, OR
from parser.modality import mood, INDICATIVE, IMPERATIVE, CONDITIONAL, SUBJUNCTIVE
from parser.modality import modality, EPISTEMIC
from parser.modality import negated
from parser.sentiment import sentiment, polarity, subjectivity, positive
from parser.sentiment import NOUN, VERB, ADJECTIVE, ADVERB
import wordnet
import wordlist
def parsetree(s, *args, **kwargs):
""" Returns a parsed Text from the given string.
"""
return Text(parse(s, *args, **kwargs))
def split(s, token=[WORD, POS, CHUNK, PNP]):
""" Returns a parsed Text from the given parsed string.
"""
return Text(s, token)
def pprint(string, token=[WORD, POS, CHUNK, PNP], column=4):
""" Pretty-prints the output of parse() as a table with outlined columns.
Alternatively, you can supply a Text or Sentence object.
"""
if isinstance(string, basestring):
print "\n\n".join([table(sentence, fill=column) for sentence in Text(string, token)])
if isinstance(string, Text):
print "\n\n".join([table(sentence, fill=column) for sentence in string])
if isinstance(string, Sentence):
print table(string, fill=colum
|
n)
def ngrams(string, n=3, continuous=False):
""" Returns a list of n-grams (tuples of n successive words) from the given string.
Alternatively, you can supply a Text or Sentence object.
With continuous=False, n-grams will not run over sentence markers (i.e., .!?).
"""
def strip_period(s, punctuation=set(".:;,!?()[]'\"")):
return [w for w in s if (i
|
sinstance(w, Word) and w.string or w) not in punctuation]
if n <= 0:
return []
if isinstance(string, basestring):
s = [strip_period(s.split(" ")) for s in tokenize(string)]
if isinstance(string, Sentence):
s = [strip_period(string)]
if isinstance(string, Text):
s = [strip_period(s) for s in string]
if continuous:
s = [sum(s, [])]
g = []
for s in s:
#s = [None] + s + [None]
g.extend([tuple(s[i:i+n]) for i in range(len(s)-n+1)])
return g
|
beepee14/scikit-learn
|
examples/ensemble/plot_ensemble_oob.py
|
Python
|
bsd-3-clause
| 3,265
| 0
|
"""
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new
|
tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Ele
|
ments of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
|
trolldbois/python-haystack-reverse
|
test/haystack/reverse/test_pointerfinder.py
|
Python
|
gpl-3.0
| 12,279
| 0.003339
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import logging
import timeit
import unittest
from haystack.mappings import folder
from haystack.mappings.base import AMemoryMapping
from haystack.mappings.base import MemoryHandler
from haystack.mappings.file import LocalMemoryMapping
import haystack.reverse.enumerators
import haystack.reverse.matchers
from haystack.reverse import searchers
from test.testfiles import zeus_856_svchost_exe
from . import test_pattern
log = logging.getLogger('test_pointerfinder')
class TestPointer(test_pattern.SignatureTests):
def setUp(self):
super(TestPointer, self).setUp()
self.mmap, self.values = self._make_mmap_with_values(self.seq)
self.name = 'test_dump_1'
self.feedback = searchers.NoFeedback()
def _make_mmap_with_values(self, intervals, struct_offset=None):
"""
Make a memory map, with a fake structure of pointer pattern inside.
Return the pattern signature
:param intervals:
:param struct_offset:
:return:
"""
# template of a memory map metadata
self._mstart = 0x0c00000
self._mlength = 4096 # end at (0x0c01000)
# could be 8, it doesn't really matter
self.word_size = self.target.get_word_size()
if struct_offset is not None:
self._struct_offset = struct_offset
else:
self._struct_offset = self.word_size*12 # 12, or any other aligned
mmap,values = self._make_mmap(0x0c00000, 4096, self._struct_offset,
intervals, self.word_size)
# add a reference to mmap in mmap2
ammap2 = AMemoryMapping(0xff7dc000, 0xff7dc000+0x1000, '-rwx', 0, 0, 0, 0, 'test_mmap2')
ammap2.set_ctypes(self.target.get_target_ctypes())
mmap2 = LocalMemoryMapping.fromBytebuffer(ammap2, mmap.get_byte_buffer())
self._memory_handler = MemoryHandler([mmap, mmap2], self.target, 'test')
self.mmap2 = mmap2
return mmap, values
class TestPointerSearcher(TestPointer):
def test_iter(self):
matcher = haystack.reverse.matchers.PointerSearcher(self._memory_handler)
self.pointerSearcher = searchers.WordAlignedSearcher(self.mmap, matcher, self.feedback, self.word_size)
iters = [value for value in self.pointerSearcher]
values = self.pointerSearcher.search()
self.assertEqual(iters, values)
self.assertEqual(self.values, values)
self.assertEqual(self.values, iters)
class TestPointerEnumerator(TestPointer):
def test_iter(self):
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
self.pointerEnum = haystack.reverse.enumerators.WordAlignedEnumerator(self.mmap, matcher, self.feedback, self.word_size)
values = [value for offset, value in self.pointerEnum]
offsets = [offset for offset, value in self.pointerEnum]
values_2 = [value for offset, value in self.pointerEnum.search()]
offsets_2 = [offset for offset, value in self.pointerEnum.search()]
self.assertEqual(values, values_2)
self.assertEqual(offsets, offsets_2)
self.assertEqual(self.values, values)
self.assertEqual(self.values, values_2)
nsig = [self._mstart + self._struct_offset]
nsig.extend(self.seq)
indices = [i for i in self._accumulate(nsig)]
self.assertEqual(indices, offsets)
self.assertEqual(indices, offsets_2)
def test_iter_advanced(self):
"""test that pointers to other mappings are detected"""
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
self.pointerEnum1 = haystack.reverse.enumerators.WordAlignedEnumerator(self.mmap, matcher, self.feedback, self.word_size)
offsets1, values1 = zip(*self.pointerEnum1.search())
self.pointerEnum2 = haystack.reverse.enumerators.WordAlignedEnumerator(self.mmap2, matcher, self.feedback, self.word_size)
offsets2, values2 = zip(*self.pointerEnum2.search())
self.assertEqual(values1, values2)
self.assertEqual(len(values1), len(self.seq)+1)
class TestPointerEnumeratorReal(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._memory_handler = folder.load(zeus_856_svchost_exe.dumpname)
#cls._memory_handler = folder.load(putty_1_win7.dumpname)
cls._utils = cls._memory_handler.get_target_platform().get_target_ctypes_utils()
return
@classmethod
def tearDownClass(cls):
cls._utils = None
cls._memory_handler.reset_mappings()
cls._memory_handler = None
return
def setUp(self):
self._heap_finder = self._memory_handler.get_heap_finder()
return
def tearDown(self):
self._heap_finder = None
return
def _stats(self, heap_addrs):
# get the weight per mapping
mapdict = {}
for m in self._memory_handler.get_mappings():
mapdict[m.start] = 0
for addr in heap_addrs:
m = self._memory_handler.get_mapping_for_address(addr)
mapdict[m.start] += 1
res = [(v,k) for k,v, in mapdict.items()]
res.sort()
res.reverse()
print('Most used mappings:')
for cnt,s in res:
if cnt == 0:
continue
m = self._memory_handler.get_mapping_for_address(s)
print(cnt, m)
|
def test_pointer_enumerators(self):
"""
Search pointers values in one HEAP
:return:
"""
# prep the workers
dumpfilename = self._memory_handler.get_name()
word_size = self._memory_handler.get_target_platform().get_word_size()
feedback = searchers.NoFeedback()
matcher = haystack.reverse.matchers.PointerEnu
|
merator(self._memory_handler)
finder = self._memory_handler.get_heap_finder()
walkers = finder.list_heap_walkers()
walker = walkers[0]
heap_addr = walker.get_heap_address()
heap = walker.get_heap_mapping()
# create the enumerator on the whole mapping
enumerator1 = haystack.reverse.enumerators.WordAlignedEnumerator(heap, matcher, feedback, word_size)
# collect the pointers
if False:
###
ts1 = timeit.timeit(enumerator1.search, number=3)
import cProfile, pstats, StringIO
pr = cProfile.Profile()
pr.enable()
# ... do something ...
heap_enum = enumerator1.search()
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
###
else:
heap_enum = enumerator1.search()
ts1 = 0.0
heap_addrs1, heap_values1 = zip(*heap_enum)
print('WordAlignedEnumerator: %d pointers, timeit %0.2f' % (len(heap_addrs1), ts1))
self._stats(heap_addrs1)
def test_pointer_enumerators_allocated(self):
"""
Search pointers values in allocated chunks from one HEAP
:return:
"""
# prep the workers
word_size = self._memory_handler.get_target_platform().get_word_size()
feedback = searchers.NoFeedback()
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
finder = self._memory_handler.get_heap_finder()
walkers = finder.list_heap_walkers()
heap_walker = walkers[0]
# create the enumerator on the allocated chunks mapping
enumerator2 = haystack.reverse.enumerators.AllocatedWordAlignedEnumerator(heap_walker, matcher, feedback, word_size)
# collect the pointers
if False:
###
ts2 = timeit.timeit(enumerator2.search, number=3)
import cProfile, pstats, StringIO
pr = cProfile.Profile()
pr.enable()
# ... do something ...
heap_enum2 = enumerator2.search()
pr.disable()
s = StringIO.StringIO()
sor
|
Bedrock02/General-Coding
|
Search/Dijkstra/hheap.py
|
Python
|
mit
| 3,638
| 0.053051
|
class hheap(dict):
@staticmethod
def _parent(i): # please use bit operation (same below)!
return (i-1)>>1
@staticmethod
def _left(i):
return (i<<1) + 1
@staticmethod
def _right(i):
return (i<<1) + 2
'''
Structure is the following
inside the heap we have a list
[position,value]
which means the dictionary holds a list
[position,value,key]
'''
def __init__(self):
self.heap = []
self.hLength = -1
dict.__init__(self)
def __setitem__(self,key,value):
if dict.__contains__(self,key):
item = dict.__getitem__(self,key)
item[1] = value
if item[1] < self.heap[self._parent(item[0])][1]:
self.heapup(item[0])
else:
self.heapdown(item[0])
else:
self.hLength += 1
self.heap.append([self.hLength,value,key])
dict.__setitem__(self,key,self.heap[-1])
self.heapup(self.hLength)
def __getitem__(self,key):
'''Get item retrieves the value of the given key '''
if dict.__contains__(self,key):
return dict.__getitem__(self,key)[1]
raise KeyError("Key does not exist")
def heapup(self,index):
''' Maintains the property of a heap by checking its parent, mostly used after insertion'''
parent = self._parent(index)
if parent is -1:
return
if self.heap[index][1] < self.heap[parent][1]:
self._swap(index,parent)
return self.heapup(parent)
if self.heap[index][1] == self.heap[parent][1]:
if self.heap[index][2] < self.heap[parent][2]:
self._swap(index,parent)
return self.heapup(parent)
return
def heapdown(self,index=0
|
):
''' Maintains the property of a heap by checking its children '''
leftChild = self._left(index)
rightChild = self._right(index)
last = len(self.heap)-1
if leftChild > last:
return
elif rightChild > last:
if self.heap[leftChild][1] < self.heap[index][1]:
self._swap(index,leftChild)
return self.heapdown(leftChild)
else
|
:
if self.heap[rightChild][1] < self.heap[leftChild][1]:
min = rightChild
else:
min = leftChild
if self.heap[index][1] > self.heap[min][1]:
self._swap(index,min)
if self.heap[index][1] == self.heap[min][1]:
if self.heap[index][2] > self.heap[min][2]:
self._swap(index,min)
return self.heapdown(min)
def _swap(self, i, j):
"""swap the contents b/w indices i and j; update hash accordingly"""
#swap within the heap
self.heap[i][0],self.heap[j][0] = j,i
self.heap[i],self.heap[j] = self.heap[j],self.heap[i]
def pop(self):
# pop root (best)
#display the soon to be popped item
popped = self.heap[0]
#remove from dict and heap
dict.__delitem__(self,popped[2])
self._swap(0,self.hLength)
self.heap.pop()
self.heapdown()
self.hLength-=1
return popped
def update_if_better(self, key, newvalue,viakey=None):
"""update if newvalue is better than the current value for key
or insert if key is not here yet."""
if dict.__contains__(self,key):
self[key] = min(self[key],newvalue)
info = dict.__getitem__(self,key)
if self[key] == newvalue:
if len(info) is 3:
info.append(viakey)
else:
info[3] = viakey
else:
self[key] = newvalue
def Display(self,arry):
#print arry
if len(arry) is 4:
print arry[2]+" "+str(arry[1])+ " (via "+arry[3]+")"
else:
if arry[1] == float("+inf"):
print str(arry[2])+" "+"unreachable"
else:
print str(arry[2])+" "+str(arry[1])
# def GenerateItems(self):
# for x in self.heap:
# yield x
def __str__(self):
string = "{"
string += ', '.join(["'" + item[0]+ "'" + ": "+str(item[1][1]) for item in sorted(self.items(),key = lambda x: x[1][1])])
string +="}"
return string
__repr__ = __str__
|
abersheeran/a2wsgi
|
script/version.py
|
Python
|
apache-2.0
| 509
| 0
|
import im
|
portlib
import os
import sys
here = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_version() -> str:
"""
Return version.
"""
sys.path.insert(0, here)
return importlib.import_module("a2wsgi").__version__
os.chdir(here)
os.system(f"poetry version {get_version()}")
os.system("git add a2wsgi/* pyproject.toml")
os.system(f'git commit -m "v{get_version()}"')
os.system("git push")
os.system("git tag v{0}".format(get_version()))
os.system("git push --t
|
ags")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.