repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
gr33ndata/19898 | nineteen898/twapi.py | Python | mit | 3,510 | 0.004843 | import os
import json
import yaml
import base64
import requests
class TWAPI:
def __init__(self, config_file='../config.yml'):
#print os.getcwd()
self.config_file = config_file
self.key = ''
self.secret = ''
self.bearer = ''
self.load_conf()
#self.show_conf()
def load_conf(self):
fd = open(self.config_file, 'r')
conf = yaml.load(fd)
self.key = conf['TwitterKey']
self.secret = conf['TwitterSecret']
fd.close()
self.get_bearer_token()
def show_conf(self):
print 'Twitter Key: ' + self.key
print 'Twitter Secret: ' + self.secret
print 'Twitter Bearer Token: ' + self.bearer
def get_bearer_token(self):
self.bearer = base64.b64encode('%s:%s' % (self.key, self.secret))
def search(self, query='', token='', max_posts=100, max_id=0):
if max_posts == 0:
return []
count = min(max_posts, 100)
url = 'https://api.twitter.com/1.1/search/tweets.json'
headers = {
'Authorization': 'Bearer ' + token
}
payload = {
'result_type': 'recent',
'q': query,
'count': count,
'max_id': max_id
}
ret_data = []
r = requests.get(url, params=payload, headers=headers)
#print r, r.text
if r.status_code == 200:
ret_data = [status for status in r.json()["statuses"]]
max_id = int(ret_data[-1]["id"]) - 1
ret_data = ret_data + self.search(query=query, token=token, max_posts=max_posts-count, max_id=max_id)
return ret_data
def users_info(self, users='', token='', include_entities=False):
url = 'https://api.twitter.com/1.1/users/lookup.json'
headers = {
'Authorization': 'Bearer ' + token
}
payload = {
'screen_name': users,
'include_entities': include_entities,
}
ret_data = []
r = requests.get(url, params=payload, headers=headers)
#print r, r.text
if r.status_code == 200:
ret_data = [user for user in r.json()]
return ret_data
def get_tweets(self, query='', count=100):
url = 'https://api.twitter.com/oauth2/token'
payload = {
'grant_type': 'client_credentials'
}
headers = {
'Authorization': 'Basic ' + self.bearer,
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
}
r = requests.post(url, data=payload, headers=headers)
if r.status_code == 200:
token = r.json()["access_token"]
return self.search(query=query, | token=token, max_posts=count)
else:
print r.status_code
def get_users(self, users='', count=10):
url = 'https://api.twitter.com/oauth2/token'
payload = {
'grant_type': 'client_credentials | '
}
headers = {
'Authorization': 'Basic ' + self.bearer,
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
}
r = requests.post(url, data=payload, headers=headers)
if r.status_code == 200:
token = r.json()["access_token"]
return self.users_info(users=users, token=token)
return user
else:
print r.status_code
if __name__ == '__main__':
tw = TWAPI()
tw.get_() |
fiete201/qutebrowser | scripts/asciidoc2html.py | Python | gpl-3.0 | 11,655 | 0.000343 | #!/usr/bin/env python3
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more | details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Generate the html documentation based on the asciidoc files."""
from typing import List, Optional
import re
import os
import sys
import subprocess
import shutil
import tempfile
import argparse
import io
impo | rt pathlib
REPO_ROOT = pathlib.Path(__file__).resolve().parents[1]
DOC_DIR = REPO_ROOT / 'qutebrowser' / 'html' / 'doc'
sys.path.insert(0, str(REPO_ROOT))
from scripts import utils
class AsciiDoc:
"""Abstraction of an asciidoc subprocess."""
FILES = ['faq', 'changelog', 'contributing', 'quickstart', 'userscripts']
def __init__(self,
asciidoc: Optional[str],
asciidoc_python: Optional[str],
website: Optional[str]) -> None:
self._cmd: Optional[List[str]] = None
self._asciidoc = asciidoc
self._asciidoc_python = asciidoc_python
self._website = website
self._homedir: Optional[pathlib.Path] = None
self._themedir: Optional[pathlib.Path] = None
self._tempdir: Optional[pathlib.Path] = None
self._failed = False
def prepare(self) -> None:
"""Get the asciidoc command and create the homedir to use."""
self._cmd = self._get_asciidoc_cmd()
self._homedir = pathlib.Path(tempfile.mkdtemp())
self._themedir = self._homedir / '.asciidoc' / 'themes' / 'qute'
self._tempdir = self._homedir / 'tmp'
self._tempdir.mkdir(parents=True)
self._themedir.mkdir(parents=True)
def cleanup(self) -> None:
"""Clean up the temporary home directory for asciidoc."""
if self._homedir is not None and not self._failed:
shutil.rmtree(str(self._homedir))
def build(self) -> None:
"""Build either the website or the docs."""
if self._website:
self._build_website()
else:
self._build_docs()
self._copy_images()
def _build_docs(self) -> None:
"""Render .asciidoc files to .html sites."""
files = [((REPO_ROOT / 'doc' / '{}.asciidoc'.format(f)),
DOC_DIR / (f + ".html")) for f in self.FILES]
for src in (REPO_ROOT / 'doc' / 'help').glob('*.asciidoc'):
dst = DOC_DIR / (src.stem + ".html")
files.append((src, dst))
# patch image links to use local copy
replacements = [
("https://raw.githubusercontent.com/qutebrowser/qutebrowser/master/doc/img/cheatsheet-big.png",
"qute://help/img/cheatsheet-big.png"),
("https://raw.githubusercontent.com/qutebrowser/qutebrowser/master/doc/img/cheatsheet-small.png",
"qute://help/img/cheatsheet-small.png")
]
asciidoc_args = ['-a', 'source-highlighter=pygments']
for src, dst in files:
assert self._tempdir is not None # for mypy
modified_src = self._tempdir / src.name
with modified_src.open('w', encoding='utf-8') as moded_f, \
src.open('r', encoding='utf-8') as f:
for line in f:
for orig, repl in replacements:
line = line.replace(orig, repl)
moded_f.write(line)
self.call(modified_src, dst, *asciidoc_args)
def _copy_images(self) -> None:
"""Copy image files to qutebrowser/html/doc."""
print("Copying files...")
dst_path = DOC_DIR / 'img'
dst_path.mkdir(exist_ok=True)
for filename in ['cheatsheet-big.png', 'cheatsheet-small.png']:
src = REPO_ROOT / 'doc' / 'img' / filename
dst = dst_path / filename
shutil.copy(str(src), str(dst))
def _build_website_file(self, root: pathlib.Path, filename: str) -> None:
"""Build a single website file."""
src = root / filename
assert self._website is not None # for mypy
dst = pathlib.Path(self._website)
dst = dst / src.parent.relative_to(REPO_ROOT) / (src.stem + ".html")
dst.parent.mkdir(exist_ok=True)
assert self._tempdir is not None # for mypy
modified_src = self._tempdir / src.name
shutil.copy(str(REPO_ROOT / 'www' / 'header.asciidoc'), modified_src)
outfp = io.StringIO()
header = modified_src.read_text(encoding='utf-8')
header += "\n\n"
with src.open('r', encoding='utf-8') as infp:
outfp.write("\n\n")
hidden = False
found_title = False
title = ""
last_line = ""
for line in infp:
line = line.rstrip()
if line == '// QUTE_WEB_HIDE':
assert not hidden
hidden = True
elif line == '// QUTE_WEB_HIDE_END':
assert hidden
hidden = False
elif line == "The Compiler <mail@qutebrowser.org>":
continue
elif re.fullmatch(r':\w+:.*', line):
# asciidoc field
continue
if not found_title:
if re.fullmatch(r'=+', line):
line = line.replace('=', '-')
found_title = True
title = last_line + " | qutebrowser\n"
title += "=" * (len(title) - 1)
elif re.fullmatch(r'= .+', line):
line = '==' + line[1:]
found_title = True
title = last_line + " | qutebrowser\n"
title += "=" * (len(title) - 1)
if not hidden:
outfp.write(line.replace(".asciidoc[", ".html[") + '\n')
last_line = line
current_lines = outfp.getvalue()
outfp.close()
modified_str = title + "\n\n" + header + current_lines
modified_src.write_text(modified_str, encoding='utf-8')
asciidoc_args = ['--theme=qute', '-a toc', '-a toc-placement=manual',
'-a', 'source-highlighter=pygments']
self.call(modified_src, dst, *asciidoc_args)
def _build_website(self) -> None:
"""Prepare and build the website."""
theme_file = REPO_ROOT / 'www' / 'qute.css'
assert self._themedir is not None # for mypy
shutil.copy(theme_file, self._themedir)
assert self._website is not None # for mypy
outdir = pathlib.Path(self._website)
for item_path in pathlib.Path(REPO_ROOT).rglob('*.asciidoc'):
if item_path.stem in ['header', 'OpenSans-License']:
continue
self._build_website_file(item_path.parent, item_path.name)
copy = {'icons': 'icons', 'doc/img': 'doc/img', 'www/media': 'media/'}
for src, dest in copy.items():
full_src = REPO_ROOT / src
full_dest = outdir / dest
try:
shutil.rmtree(full_dest)
except FileNotFoundError:
pass
shutil.copytree(full_src, full_dest)
for dst, link_name in [
('README.html', 'index.html'),
((pathlib.Path('doc') / 'quickstart.html'), 'quickstart.html'),
]:
assert isinstance(dst, (str, pathlib.Path)) # for mypy
try:
(outdir / link_name).sym |
DramaFever/sst | src/sst/selftests/by_xpath.py | Python | apache-2.0 | 989 | 0 | import sst
import sst.actions
# xpath locator tests
#
# see: http://seleniumhq.org/docs/appendix_locating_techniques.html
sst.actions.set_base_url('http://localhost:%s/' % sst.DEVSERVER_PORT)
sst.actions.go_to('/')
sst.actions.get_element_by_xpath("//p[contains(@class, 'unique_class')]")
sst.actions.get_element_by_xpath("//a[contains(@id, 'band_link')]")
sst.actions.get_element_by_xpath("//a[starts-with(@id, 'the_band_l')]")
sst.actions.get_elem | ents_by_xpath('//p')
sst.actions.get_elements_by_xpath("//p[contains(@class, 'some_class')]")
sst.actions.fails(
sst.actions.get_element_by_xpath, '//doesnotexist')
sst.actions.fails(
sst.actions.get_element_by_xpath, "//a[contains(@id, 'doesnotexist')]")
assert len(sst.actions.get_elements_by_xpath(
'//doesnotexist'
)) == 0
assert len(sst.actions.get_elements_by_xpath(
"//p[contains(@class, 'unique_class')]"
)) == 1
assert len(sst.action | s.get_elements_by_xpath(
"//p[contains(@class, 'some_class')]"
)) == 2
|
mahirrudin/easy-octopress | windows/ez_setup.py | Python | mit | 11,369 | 0.002199 | #!/usr/bin/env python
"""Bootstrap setuptools installation
To use setuptools in your package's setup.py, include this
file in the same directory and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
To require a specific version of setuptools, set a download
mirror, or use an alternate download directory, simply supply
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import tempfile
import tarfile
import optparse
import subprocess
import platform
import textwrap
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "2.1"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
to_dir = os.path.abspath(to_dir)
rep_modules = 'pkg_resources', 'setuptools'
imported = set(sys.modules).intersection(rep_modules)
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir, download_delay)
except pkg_resources.VersionConflict as VC_err:
if imported:
msg = textwrap.dedent("""
The required version of setuptools (>={version}) is not available,
and can't be installed while this script is running. Please
install a more recent version first, using
'easy_install -U setuptools'.
(Currently using {VC_err.args[0]!r})
""").format(VC_err=VC_err, version=version)
sys.stderr.write(msg)
sys.exit(2)
# otherwise, reload ok
del pkg_resources, sys.modules['pkg_resources']
return _do_download(version, download_base, to_dir, download_delay)
def _clean_check(cmd, target):
"""
Run the command to download target. If the command fails, clean up before
re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
cmd = [
'powershell',
'-Command',
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(),
]
_clean_check(cmd, target)
def has_powershell():
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
devnull = open(os.path.devnull, 'wb')
try:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except:
return False
finally:
devnull.close()
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
src = | dst = None
try:
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(target, "wb")
dst.write(data)
finally:
| if src:
src.close()
if dst:
dst.close()
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = [
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
]
for dl in downloaders:
if dl.viable():
return dl
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15,
downloader_factory=get_best_downloader):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function takin |
mattilyra/gensim | gensim/test/test_corpora_dictionary.py | Python | lgpl-2.1 | 12,494 | 0.000963 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Unit tests for the `corpora.Dictionary` class.
"""
from collections import Mapping
import logging
import unittest
import codecs
import os
import os.path
import scipy
import gensim
from gensim.corpora import Dictionary
from gensim.utils import to_utf8
from gensim.test.utils import get_tmpfile, common_texts
from six import PY3
from six.moves import zip
class TestDictionary(unittest.TestCase):
def setUp(self):
self.texts = common_texts
def testDocFreqOneDoc(self):
texts = [['human', 'interface', 'computer']]
d = Dictionary(texts)
expected = {0: 1, 1: 1, 2: 1}
self.assertEqual(d.dfs, expected)
def testDocFreqAndToken2IdForSeveralDocsWithOneWord(self):
# two docs
texts = [['human'], ['human']]
d = Dictionary(texts)
expected = {0: 2}
self.assertEqual(d.dfs, expected)
# only one token (h | uman) should exist
expected = {'human': 0}
| self.assertEqual(d.token2id, expected)
# three docs
texts = [['human'], ['human'], ['human']]
d = Dictionary(texts)
expected = {0: 3}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 0}
self.assertEqual(d.token2id, expected)
# four docs
texts = [['human'], ['human'], ['human'], ['human']]
d = Dictionary(texts)
expected = {0: 4}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 0}
self.assertEqual(d.token2id, expected)
def testDocFreqForOneDocWithSeveralWord(self):
# two words
texts = [['human', 'cat']]
d = Dictionary(texts)
expected = {0: 1, 1: 1}
self.assertEqual(d.dfs, expected)
# three words
texts = [['human', 'cat', 'minors']]
d = Dictionary(texts)
expected = {0: 1, 1: 1, 2: 1}
self.assertEqual(d.dfs, expected)
def testBuild(self):
d = Dictionary(self.texts)
# Since we don't specify the order in which dictionaries are built,
# we cannot reliably test for the mapping; only the keys and values.
expected_keys = list(range(12))
expected_values = [2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
self.assertEqual(sorted(d.dfs.keys()), expected_keys)
self.assertEqual(sorted(d.dfs.values()), expected_values)
expected_keys = sorted([
'computer', 'eps', 'graph', 'human', 'interface',
'minors', 'response', 'survey', 'system', 'time', 'trees', 'user'
])
expected_values = list(range(12))
self.assertEqual(sorted(d.token2id.keys()), expected_keys)
self.assertEqual(sorted(d.token2id.values()), expected_values)
def testMerge(self):
d = Dictionary(self.texts)
f = Dictionary(self.texts[:3])
g = Dictionary(self.texts[3:])
f.merge_with(g)
self.assertEqual(sorted(d.token2id.keys()), sorted(f.token2id.keys()))
def testFilter(self):
d = Dictionary(self.texts)
d.filter_extremes(no_below=2, no_above=1.0, keep_n=4)
expected = {0: 3, 1: 3, 2: 3, 3: 3}
self.assertEqual(d.dfs, expected)
def testFilterKeepTokens_keepTokens(self):
# provide keep_tokens argument, keep the tokens given
d = Dictionary(self.texts)
d.filter_extremes(no_below=3, no_above=1.0, keep_tokens=['human', 'survey'])
expected = {'graph', 'trees', 'human', 'system', 'user', 'survey'}
self.assertEqual(set(d.token2id.keys()), expected)
def testFilterKeepTokens_unchangedFunctionality(self):
# do not provide keep_tokens argument, filter_extremes functionality is unchanged
d = Dictionary(self.texts)
d.filter_extremes(no_below=3, no_above=1.0)
expected = {'graph', 'trees', 'system', 'user'}
self.assertEqual(set(d.token2id.keys()), expected)
def testFilterKeepTokens_unseenToken(self):
# do provide keep_tokens argument with unseen tokens, filter_extremes functionality is unchanged
d = Dictionary(self.texts)
d.filter_extremes(no_below=3, no_above=1.0, keep_tokens=['unknown_token'])
expected = {'graph', 'trees', 'system', 'user'}
self.assertEqual(set(d.token2id.keys()), expected)
def testFilterMostFrequent(self):
d = Dictionary(self.texts)
d.filter_n_most_frequent(4)
expected = {0: 2, 1: 2, 2: 2, 3: 2, 4: 2, 5: 2, 6: 2, 7: 2}
self.assertEqual(d.dfs, expected)
def testFilterTokens(self):
self.maxDiff = 10000
d = Dictionary(self.texts)
removed_word = d[0]
d.filter_tokens([0])
expected = {
'computer': 0, 'eps': 8, 'graph': 10, 'human': 1,
'interface': 2, 'minors': 11, 'response': 3, 'survey': 4,
'system': 5, 'time': 6, 'trees': 9, 'user': 7
}
del expected[removed_word]
self.assertEqual(sorted(d.token2id.keys()), sorted(expected.keys()))
expected[removed_word] = len(expected)
d.add_documents([[removed_word]])
self.assertEqual(sorted(d.token2id.keys()), sorted(expected.keys()))
def test_doc2bow(self):
d = Dictionary([["žluťoučký"], ["žluťoučký"]])
# pass a utf8 string
self.assertEqual(d.doc2bow(["žluťoučký"]), [(0, 1)])
# doc2bow must raise a TypeError if passed a string instead of array of strings by accident
self.assertRaises(TypeError, d.doc2bow, "žluťoučký")
# unicode must be converted to utf8
self.assertEqual(d.doc2bow([u'\u017elu\u0165ou\u010dk\xfd']), [(0, 1)])
def test_saveAsText(self):
"""`Dictionary` can be saved as textfile. """
tmpf = get_tmpfile('save_dict_test.txt')
small_text = [
["prvé", "slovo"],
["slovo", "druhé"],
["druhé", "slovo"]
]
d = Dictionary(small_text)
d.save_as_text(tmpf)
with codecs.open(tmpf, 'r', encoding='utf-8') as file:
serialized_lines = file.readlines()
self.assertEqual(serialized_lines[0], u"3\n")
self.assertEqual(len(serialized_lines), 4)
# We do not know, which word will have which index
self.assertEqual(serialized_lines[1][1:], u"\tdruhé\t2\n")
self.assertEqual(serialized_lines[2][1:], u"\tprvé\t1\n")
self.assertEqual(serialized_lines[3][1:], u"\tslovo\t3\n")
d.save_as_text(tmpf, sort_by_word=False)
with codecs.open(tmpf, 'r', encoding='utf-8') as file:
serialized_lines = file.readlines()
self.assertEqual(serialized_lines[0], u"3\n")
self.assertEqual(len(serialized_lines), 4)
self.assertEqual(serialized_lines[1][1:], u"\tslovo\t3\n")
self.assertEqual(serialized_lines[2][1:], u"\tdruhé\t2\n")
self.assertEqual(serialized_lines[3][1:], u"\tprvé\t1\n")
def test_loadFromText_legacy(self):
"""
`Dictionary` can be loaded from textfile in legacy format.
Legacy format does not have num_docs on the first line.
"""
tmpf = get_tmpfile('load_dict_test_legacy.txt')
no_num_docs_serialization = to_utf8("1\tprvé\t1\n2\tslovo\t2\n")
with open(tmpf, "wb") as file:
file.write(no_num_docs_serialization)
d = Dictionary.load_from_text(tmpf)
self.assertEqual(d.token2id[u"prvé"], 1)
self.assertEqual(d.token2id[u"slovo"], 2)
self.assertEqual(d.dfs[1], 1)
self.assertEqual(d.dfs[2], 2)
self.assertEqual(d.num_docs, 0)
def test_loadFromText(self):
"""`Dictionary` can be loaded from textfile."""
tmpf = get_tmpfile('load_dict_test.txt')
no_num_docs_serialization = to_utf8("2\n1\tprvé\t1\n2\tslovo\t2\n")
with open(tmpf, "wb") as file:
file.write(no_num_docs_serialization)
d = Dictionary.load_from_text(tmpf)
self.assertEqual(d |
FRC900/2016VisionCode | preseason_edge_detection/edge_detection.py | Python | mit | 1,892 | 0.048626 | # edge detection and colorspaces, includes laplacian and sobel filters that are tuned to the pink whiffle ball
import cv2
import numpy as np
def nothing(x):
pass
cap = cv2.VideoCapture(0)
cv2.namedWindow('frame1')
kernel = np.ones((5,5),np.uint8)
# create trackbars for color change, tuned to pink whiffle ball
# lower
cv2.createTrackbar('HLo','frame1',120,179,nothing)
cv2.createTrackbar('SLo','frame1',72,255,nothing)
cv2.createTrackbar('VLo','frame1',120,255,nothing)
# upper
cv2.createTrackbar('HUp','frame1',179,179,nothing)
cv2.createTrackbar('SUp','frame1',255,255,nothing)
cv2.createTrackbar('VUp','frame1',255,255,nothing)
while(1):
# Take each frame
_, frame = cap.read()
# get current positions of four trackbars
hLo = cv2.getTrackbarPos('HLo','frame1')
sLo = cv2.getTrackbarPos('SLo','frame1')
vLo = cv2.getTrackbarPos('VLo','frame1')
hUp = cv2.getTrackbarPos('HUp','frame1')
sUp = cv2.getTrackbarPos('SUp','frame1')
vUp = cv2.getTrackbarPos('VUp','frame1')
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
|
# define range of color in HSV
lower = np.array([hLo,sLo,vLo])
upper = np.array([hUp,sUp,vUp])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower, upper)
# Bitwise-AND mask and original ima | ge
res = cv2.bitwise_and(frame,frame, mask= mask)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
laplacian = cv2.Laplacian(closing,cv2.CV_64F)
sobelx = cv2.Sobel(closing,cv2.CV_64F,1,0,ksize=5)
sobely = cv2.Sobel(closing,cv2.CV_64F,0,1,ksize=5)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res',res)
cv2.imshow('opening',opening)
cv2.imshow('closing',closing)
cv2.imshow('laplacian',laplacian)
cv2.imshow('sobelx',sobelx)
cv2.imshow('sobely',sobely)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
|
837468220/python-for-android | python3-alpha/python3-src/Lib/test/test_codecs.py | Python | apache-2.0 | 64,172 | 0.00173 | from test import support
import unittest
import codecs
import locale
import sys, _testcapi, io
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self, buffer):
self._buffer = buffer
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = self._buffer[:0] # make empty
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class MixInCheckStateHandling:
def check_state_handling_decode(self, encoding, u, s):
for i in range(len(s)+1):
d = codecs.getincrementaldecoder(encoding)()
part1 = d.decode(s[:i])
state = d.getstate()
self.assertIsInstance(state[1], int)
# Check that the condition stated in the documentation for
# IncrementalDecoder.getstate() holds
if not state[1]:
# reset decoder to the default state without anything buffered
d.setstate((state[0][:0], 0))
# Feeding the previous input may not produce any output
self.assertTrue(not d.decode(state[0]))
# The decoder must return to the same state
self.assertEqual(state, d.getstate())
# Create a new decoder and set it to the state
# we extracted from the old one
d = codecs.getincrementaldecoder(encoding)()
d.setstate(state)
part2 = d.decode(s[i:], True)
self.assertEqual(u, part1+part2)
def check_state_handling_encode(self, encoding, u, s):
for i in range(len(u)+1):
d = codecs.getincrementalencoder(encoding)()
part1 = d.encode(u[:i])
state = d.getstate()
d = codecs.getincrementalencoder(encoding)()
d.setstate(state)
part2 = d.encode(u[i:], True)
self.assertEqual(s, part1+part2)
class ReadTest(unittest.TestCase, MixInCheckStateHandling):
def check_partial(self, input, partialresults):
# get a StreamReader for the encoding and feed the bytestring versio | n
# of input to the reader | byte by byte. Read everything available from
# the StreamReader and check that the results equal the appropriate
# entries from partialresults.
q = Queue(b"")
r = codecs.getreader(self.encoding)(q)
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
q.write(bytes([c]))
result += r.read()
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(r.read(), "")
self.assertEqual(r.bytebuffer, b"")
# do the check again, this time using a incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# Check whether the reset method works properly
d.reset()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
"".join(codecs.iterdecode([bytes([c]) for c in encoded], self.encoding))
)
def test_readline(self):
def getreader(input):
stream = io.BytesIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = "foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = "foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = "foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate("\n \r\n \r \u2028".split()):
vw.append((i*200)*"\3042" + lineend)
vwo.append((i*200)*"\3042")
self.assertEqual(readalllines("".join(vw), True), "".join(vw))
self.assertEqual(readalllines("".join(vw), False),"".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in range(80):
for lineend in "\n \r\n \r \u2028".split():
s = 10*(size*"a" + lineend + "xxx\n")
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=True),
size*"a" + lineend,
)
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=False),
size*"a",
)
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse() # descending\r\n',
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
' #-------------------- TODAY\'S ARTICLES\r\n',
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
' #-------------------- ACTIVE ARTICLES redirect\r\n',
' self.Yredirect("active.y")\r\n',
|
yotomyoto/benzene-vanilla | tournament/summary.py | Python | lgpl-3.0 | 9,376 | 0.005546 | #!/usr/bin/python -u
#----------------------------------------------------------------------------
# Summarizes a twogtp tournament.
#
# TODO: - Simplify stuff. The table idea seems bad, in retrospect.
# - Do we really care about which openings are won/lost?
import os, sys, getopt, re, string
from statistics import Statistics
#----------------------------------------------------------------------------
def get_value(table, key):
if (table.has_key(key)):
return table[key]
return 0
def add_if_new(list, value):
if (list.count(value)==0):
list.append(value)
# Yes, I'm using globals. Yes, I'm lazy.
gamelen = Statistics()
elapsedP1 = Statistics()
elapsedP2 = Statistics()
p1Overtime = Statistics()
p2Overtime = Statistics()
p1Wins = Statistics()
p1WinsBlack = Statistics()
p1WinsWhite = Statistics()
def analyzeTourney(fname, longOpening, maxvalid, showTable, plotScore,
timeLimit, validOpenings):
print "Analyzing: \'" + fname + "\'..."
if plotScore:
pf = open("plot.dat", "w")
f = open(fname, "r")
line = f.readline()
linenum = 1
numvalid = 0
table = {}
opencount = {}
openings = []
| progs = []
p1Timeouts = 0.0
p2Timeouts = 0.0
while line != "":
if line[0] != "#":
array = string.split(line, "\t")
fullopening = array[2]
black = array[3]
white = array[4]
bres = array[5]
wres = array[6]
length = | array[7]
timeBlack = float(array[8])
timeWhite = float(array[9])
if longOpening:
opening = string.strip(fullopening)
else:
moves = string.split(string.strip(fullopening), ' ')
opening = moves[0]
considerGame = True
if (validOpenings != []):
if (not opening in validOpenings):
considerGame = False
# TODO: check that results are of the form "C+", where C
# is one of 'B' or 'W', instead of just checking the first
# character.
if (not considerGame):
print "Game not in valid openings"
elif ((numvalid < maxvalid) and (bres == wres) and
((bres[0] == 'B') or (bres[0] == 'W'))):
add_if_new(openings, opening)
add_if_new(progs, black)
add_if_new(progs, white)
colors = ['B', 'W']
names = [black, white]
winner = names[colors.index(bres[0])]
valueForP1 = 0.0
if (winner == progs[0]):
valueForP1 = 1.0
if (((timeBlack > timeLimit) and (bres[0] == 'B')) or
((timeWhite > timeLimit) and (bres[0] == 'W'))):
overtime = 0.0
if (bres[0] == 'B'):
overtime = timeBlack - timeLimit
else:
overtime = timeWhite - timeLimit
if (winner == progs[0]):
p1Timeouts = p1Timeouts + 1.0
p1Overtime.add(overtime)
else:
p2Timeouts = p2Timeouts + 1.0
p2Overtime.add(overtime)
gamelen.add(float(length))
p1Wins.add(valueForP1)
if (plotScore):
print >> pf, str(p1Wins.count()) + "\t" + str(p1Wins.mean()) + "\t" + str(p1Wins.stderror())
if (progs[0] == black):
p1WinsBlack.add(valueForP1)
else:
p1WinsWhite.add(valueForP1)
if (progs[0] == black):
elapsedP1.add(float(timeBlack))
elapsedP2.add(float(timeWhite))
else:
elapsedP1.add(float(timeWhite))
elapsedP2.add(float(timeBlack))
opencount[opening] = get_value(opencount, opening) + 1
for color in colors:
who = names[colors.index(color)]
if (bres[0] == color):
key = opening, color, who, 'win'
table[key] = get_value(table, key) + 1
else:
key = opening, color, who, 'loss'
table[key] = get_value(table, key) + 1
numvalid = numvalid + 1
elif (numvalid >= maxvalid):
print "Line " + str(linenum) + ": Past max game limit."
else:
print "Line " + str(linenum) + ": Ignoring bad game result."
line = f.readline()
linenum = linenum+1
f.close()
if (plotScore):
pf.close()
print ""
for p in progs:
print "p" + str(progs.index(p)+1)+ " = " + p
showIterativeResults(numvalid, table, opencount,
openings, progs, showTable,
p1Timeouts, p2Timeouts)
# for k in sorted(table.keys()):
# print k, table[k]
def showIterativeResults(numvalid, table, opencount, openings,
progs, showTable, p1Timeouts, p2Timeouts):
if showTable:
print "+-------------+--------+-------+-------+"
print "+ OPENING | COUNT | p1 | p2 |"
print "+-------------+--------+-------+-------+"
b1w = 0
b1l = 0
b2w = 0
b2l = 0
openings.sort()
for o in openings:
cb1w = get_value(table, (o, 'B', progs[0], 'win'))
cb1l = get_value(table, (o, 'B', progs[0], 'loss'))
cb2w = get_value(table, (o, 'B', progs[1], 'win'))
cb2l = get_value(table, (o, 'B', progs[1], 'loss'))
b1w = b1w + cb1w
b1l = b1l + cb1l
b2w = b2w + cb2w
b2l = b2l + cb2l
if showTable:
print "|%s \t\t%4d\t%3i/%i\t%3i/%i |" % \
(o, opencount[o], cb1w, cb1l, cb2w, cb2l)
if showTable:
print "+--------------------------------------+"
print "| \t\t \t%3i/%i\t%3i/%i |" % (b1w, b1l, b2w, b2l)
print "+--------------------------------------+"
if (numvalid != 0):
print
print "==========================================================="
print " NumGames: " + str(numvalid)
print " GameLen: " + gamelen.dump()
print " p1Time: " + elapsedP1.dump()
print " p2Time: " + elapsedP2.dump()
print "-----------------------------------------------------------"
print "Statistics for " + progs[0] + ":"
print " All Wins: %.1f%% (+-%.1f)" % \
(p1Wins.mean()*100.0, p1Wins.stderror()*100.0)
print " As Black: %.1f%% (+-%.1f)" % \
(p1WinsBlack.mean()*100.0, p1WinsBlack.stderror()*100.0)
print " As White: %.1f%% (+-%.1f)" % \
(p1WinsWhite.mean()*100.0, p1WinsWhite.stderror()*100.0)
if ((p1Timeouts > 0) or (p2Timeouts > 0)):
print "-----------------------------------------------------------"
print "Timeouts for " + progs[0] + ": %i/%i, %.1f (+-%.1f) max=%.1f" % \
(p1Timeouts, p1Wins.sum(),
p1Overtime.mean(), p1Overtime.stderror(), p1Overtime.max())
print "Timeouts for " + progs[1] + ": %i/%i, %.1f (+-%.1f) max=%.1f" % \
(p2Timeouts, p1Wins.count() - p1Wins.sum(),
p2Overtime.mean(), p2Overtime.stderror(), p2Overtime.max())
print "==========================================================="
else:
print "No valid games."
#------------------------------------------------------------------------------
def usage():
print "Usage: ./summary [--count c] [--showTable] [--time max time] --openings [openings file] --file [tournament.result]"
sys.exit(-1)
def main():
count = 50000
timeLimit = 123456.789
longOpening = False
sho |
PhonologicalCorpusTools/PolyglotDB | polyglotdb/corpus/lexical.py | Python | mit | 2,263 | 0.003093 | from ..io.importer import lexicon_data_to_csvs, import_lexicon_csvs
from ..io.enrichment.lexical import enrich_lexicon_from_csv, parse_file
from .spoken import SpokenContext
class LexicalContext(SpokenContext):
"""
Class that contains methods for dealing specifically with words
"""
def enrich_lexicon(self, lexicon_data, type_data=None, case_sensitive=False):
"""
adds properties to lexicon, adds properties to hierarchy
Parameters
----------
lexicon_data : dict
the data in the lexicon
type_data : dict
default to None
case_sensitive : bool
default to False
"""
if type_data is None:
type_data = {k: type(v) for k, v in next(iter(lexicon_data.values())).items()}
removed = [x for x in type_data.keys() if self.hierarchy.has_type_property(self.word_name, x)]
type_data = {k: v for k,v in type_data.items() if k not in removed}
if not type_data:
return
lexicon_data_to | _csvs(self, lexicon_data, case_sensitive=case_sensitive)
import_lexicon_csvs(self, type_data, case_sensitive=case_sensitive)
self.hierarchy.add_type_properties(self, self.word_name, type_data.items())
self.encode_hierarchy()
def enrich_lexicon_from_csv(self, path, case_sensitive=False):
"""
Enriches lexicon from a CSV file
Parameters
----------
path : str
the path to the csv file
case_sensitive : | boolean
Defaults to false
"""
enrich_lexicon_from_csv(self, path, case_sensitive)
def reset_lexicon_csv(self, path):
"""
Remove properties that were encoded via a CSV file
Parameters
----------
path : str
CSV file to get property names from
"""
data, type_data = parse_file(path, labels=[])
word = getattr(self, 'lexicon_' + self.word_name)
q = self.query_lexicon(word)
property_names = [x for x in type_data.keys()]
q.set_properties(**{x: None for x in property_names})
self.hierarchy.remove_type_properties(self, self.word_name, property_names)
self.encode_hierarchy()
|
mythmon/edwin | edwin/bundles.py | Python | mpl-2.0 | 584 | 0 | class BundleConfiguration(object):
def PIPELINE_CSS(self):
return {
'client': {
| 'source_filenames': [
'font-awesome/css/font-awesome.css',
'css/client.less',
],
'output_filename': 'css/client.css',
},
}
def PIPELINE_JS(self):
return {
'client': {
'source_filenames': [
'js/client | .browserify.js',
],
'output_filename': 'js/client.js',
},
}
|
mhbu50/erpnext | erpnext/accounts/doctype/accounts_settings/accounts_settings.py | Python | gpl-3.0 | 2,701 | 0.01666 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
import frappe
from frappe import _
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
from frappe.model.document import Document
from frappe.utils import cint
class AccountsSettings(Document):
def on_update(self):
frappe.clear_cache()
def validate(self):
frappe.db.set_default("add_taxes_from_item_tax_template",
self.get("add_taxes_from_item_tax_template", 0))
self.validate_stale_days()
self.enable_payment_schedule_in_print()
self.toggle_discount_accounting_fields()
def validate_stale_days(self):
if not self.allow_stale and cint(self.stale_days) <= 0:
frappe.msgprint(
_("Stale Days should start from 1."), title='Error', indicator='red',
raise_exception=1)
def enable_payment_schedule_in_print(self):
show_in_print = cint(self.show_payment_schedule_in_print)
for doctype in ("Sales Order", "Sales Invoice", "Purchase Order", "Purchase Invoice"):
make_property_setter(doctype, "due_date", "print_hide", show_in_print, "Check", validate_fields_for_doctype=False)
make_property_setter(doc | type, "payment_schedule", "print_hide", 0 if show_in_print else 1, "Check", validate_fields_for_doctype=False)
def toggle_discount_accounting_fields(self):
enabl | e_discount_accounting = cint(self.enable_discount_accounting)
for doctype in ["Sales Invoice Item", "Purchase Invoice Item"]:
make_property_setter(doctype, "discount_account", "hidden", not(enable_discount_accounting), "Check", validate_fields_for_doctype=False)
if enable_discount_accounting:
make_property_setter(doctype, "discount_account", "mandatory_depends_on", "eval: doc.discount_amount", "Code", validate_fields_for_doctype=False)
else:
make_property_setter(doctype, "discount_account", "mandatory_depends_on", "", "Code", validate_fields_for_doctype=False)
for doctype in ["Sales Invoice", "Purchase Invoice"]:
make_property_setter(doctype, "additional_discount_account", "hidden", not(enable_discount_accounting), "Check", validate_fields_for_doctype=False)
if enable_discount_accounting:
make_property_setter(doctype, "additional_discount_account", "mandatory_depends_on", "eval: doc.discount_amount", "Code", validate_fields_for_doctype=False)
else:
make_property_setter(doctype, "additional_discount_account", "mandatory_depends_on", "", "Code", validate_fields_for_doctype=False)
make_property_setter("Item", "default_discount_account", "hidden", not(enable_discount_accounting), "Check", validate_fields_for_doctype=False)
|
dbrattli/RxPY | examples/autocomplete/autocomplete.py | Python | apache-2.0 | 2,765 | 0.005787 | """
RxPY example running a Tornado server doing search queries against Wikipedia to
populate the autocomplete dropdown in the web UI. Start using
`python autocomplete.py` and navigate your web browser to http://localhost:8080
Uses the RxPY IOLoopScheduler (works on both Python 2.7 and 3.4)
"""
import os
from tornado.websocket import WebSocketHandler
from tornado.web import RequestHandler, StaticFileHandler, Application, url
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import url_concat
from tornado.escape import json_encode, json_decode
from tornado import ioloop
from rx.subjects import Subject
from rx.concurrency import IOLoopScheduler
scheduler = IOLoopScheduler()
def search_wikipedia(term):
"""Search Wikipedia for a given term"""
| url = 'http://en.wikipedia.org/w/api.php'
params = {
"action": 'opensearch',
"search | ": term,
"format": 'json'
}
# Must set a user agent for non-browser requests to Wikipedia
user_agent = "RxPY/1.0 (https://github.com/dbrattli/RxPY; dag@brattli.net) Tornado/4.0.1"
url = url_concat(url, params)
http_client = AsyncHTTPClient()
return http_client.fetch(url, method='GET', user_agent=user_agent)
class WSHandler(WebSocketHandler):
def open(self):
print("WebSocket opened")
# A Subject is both an observable and observer, so we can both subscribe
# to it and also feed (on_next) it with new values
self.subject = Subject()
# Get all distinct key up events from the input and only fire if long enough and distinct
query = self.subject.map(
lambda x: x["term"]
).filter(
lambda text: len(text) > 2 # Only if the text is longer than 2 characters
).debounce(
0.750, # Pause for 750ms
scheduler=scheduler
).distinct_until_changed() # Only if the value has changed
searcher = query.flat_map_latest(search_wikipedia)
def send_response(x):
self.write_message(x.body)
def on_error(ex):
print(ex)
searcher.subscribe(send_response, on_error)
def on_message(self, message):
obj = json_decode(message)
self.subject.on_next(obj)
def on_close(self):
print("WebSocket closed")
class MainHandler(RequestHandler):
def get(self):
self.render("index.html")
def main():
port = os.environ.get("PORT", 8080)
app = Application([
url(r"/", MainHandler),
(r'/ws', WSHandler),
(r'/static/(.*)', StaticFileHandler, {'path': "."})
])
print("Starting server at port: %s" % port)
app.listen(port)
ioloop.IOLoop.current().start()
if __name__ == '__main__':
main()
|
gimli-org/gimli | pygimli/frameworks/methodManager.py | Python | apache-2.0 | 29,010 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Method Manager
Provide the end user interface for method (geophysical) dependent
modelling and inversion as well as data and model visualization.
"""
import numpy as np
import pygimli as pg
from pygimli.utils import prettyFloat as pf
def fit(funct, data, err=None, **kwargs):
"""Generic function fitter.
Fit data to a given function.
TODO
----
* Dictionary support for funct to submit user data..
Parameters
----------
funct: callable
Function with the first argmument as data space, e.g., x, t, f, Nr. ..
Any following arguments are the parameters to be fit.
Except if a verbose flag if used.
data: iterable (float)
Data values
err: iterable (float) [None]
Data error values in %/100. Default is 1% if None are given.
Other Parameters
----------------
*dataSpace*: iterable
Keyword argument of the data space of len(data).
The name need to fit the first argument of funct.
Returns
-------
model: array
Fitted model parameter.
response: array
Model response.
Example
-------
>>> import pygimli as pg
>>>
>>> func = lambda t, a, b: a*np.exp(b*t)
>>> t = np.linspace(1, 2, 20)
>>> data = func(t, 1.1, 2.2)
>>> model, response = pg.frameworks.fit(func, data, t=t)
>>> print(pg.core.round(model, 1e-5))
2 [1.1, 2.2]
>>> _ = pg.plt.plot(t, data, 'o', label='data')
>>> _ = pg.plt.plot(t, response, label='response')
>>> _ = pg.plt.legend()
"""
mgr = ParameterInversionManager(funct, **kwargs)
model = mgr.invert(data, err, **kwargs)
return model, mgr.fw.response
# TG: harmonicFit does not really belong here as it is no curve fit
# We should rather use a class Decomposition
# Discuss .. rename to Framework or InversionFramework since he only manages
# the union of Inversion/Modelling and RegionManager(later)
class MethodManager(object):
"""General manager to maintenance a measurement method.
Method Manager are the interface to end-user interaction and can be seen as
simple but complete application classes which manage all tasks of
geophysical data processing.
The method manager holds one instance of a forward operator and an
appropriate inversion framework to handle modelling and data inversion.
Method Manager also helps with data import and export,
handle measurement data error estimation as well as model and data
visualization.
Attributes
----------
verbose : bool
Give verbose output.
debug : bool
Give debug output.
fop : :py:mod:`pygimli.frameworks.Modelling`
Forward Operator instance .. knows the physics.
fop is initialized by
:py:mod:`pygimli.manager.MethodManager.initForwardOperator`
and calls a valid
:py:mod:`pygimli.manager.MethodManager.createForwardOperator` method
in any derived classes.
inv : :py:mod:`pygimli.frameworks.Inversion`.
Inversion framework instance .. knows the reconstruction approach.
The attribute inv is initialized by default but can be changed
overwriting
:py:mod:`pygimli.manager.MethodManager.initInversionFramework`
"""
def __init__(self, | fop=None, fw=None, data=None, **kwargs):
"""Constructor."""
self._fop = fop
self._fw = fw
# we hold our own copy of the data
self._verbose = kwargs.pop('verbose', False)
self._debug = kwargs.pop('debug', False)
| self.data = None
if data is not None:
if isinstance(data, str):
self.load(data)
else:
self.data = data
# The inversion framework
self._initInversionFramework(verbose=self._verbose,
debug=self._debug)
# The forward operator is stored in self._fw
self._initForwardOperator(verbose=self._verbose, **kwargs)
# maybe obsolete
self.figs = {}
self.errIsAbsolute = False
def __hash__(self):
"""Create a hash for Method Manager."""
return pg.utils.strHash(str(type(self))) ^ hash(self.fop)
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, v):
self._verbose = v
self.fw.verbose = self._verbose
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, v):
self._debug = v
self.fw.debug = self._debug
@property
def fw(self):
return self._fw
@property
def fop(self):
return self.fw.fop
@property
def inv(self):
return self.fw
@property
def model(self):
return self.fw.model
def reinitForwardOperator(self, **kwargs):
"""Reinitialize the forward operator.
Sometimes it can be useful to reinitialize the forward operator.
Keyword arguments will be forwarded to 'self.createForwardOperator'.
"""
self._initForwardOperator(**kwargs)
def _initForwardOperator(self, **kwargs):
"""Initialize or re-initialize the forward operator.
Called once in the constructor to force the manager to create the
necessary forward operator member. Can be recalled if you need to
changed the mangers own forward operator object. If you want an own
instance of a valid FOP call createForwardOperator.
"""
if self._fop is not None:
fop = self._fop
else:
fop = self.createForwardOperator(**kwargs)
if fop is None:
pg.critical("It seems that createForwardOperator method "
"does not return a valid forward operator.")
if self.fw is not None:
self.fw.reset()
self.fw.setForwardOperator(fop)
else:
pg.critical("No inversion framework defined.")
def createForwardOperator(self, **kwargs):
"""Mandatory interface for derived classes.
Here you need to specify which kind of forward operator FOP
you want to use.
This is called by any initForwardOperator() call.
Parameters
----------
**kwargs
Any arguments that are necessary for your FOP creation.
Returns
-------
Modelling
Instance of any kind of :py:mod:`pygimli.framework.Modelling`.
"""
pg.critical("No forward operator defined, either give one or "
"overwrite in derived class")
def _initInversionFramework(self, **kwargs):
"""Initialize or re-initialize the inversion framework.
Called once in the constructor to force the manager to create the
necessary Framework instance.
"""
self._fw = self.createInversionFramework(**kwargs)
if self.fw is None:
pg.critical("createInversionFramework does not return "
"valid inversion framework.")
def createInversionFramework(self, **kwargs):
"""Create default Inversion framework.
Derived classes may overwrite this method.
Parameters
----------
**kwargs
Any arguments that are necessary for your creation.
Returns
-------
Inversion
Instance of any kind of :py:mod:`pygimli.framework.Inversion`.
"""
if self._fw is None:
return pg.frameworks.Inversion(**kwargs)
else:
return self._fw
def load(self, fileName):
"""API, overwrite in derived classes."""
pg.critical('API, overwrite in derived classes', fileName)
def estimateError(self, data, errLevel=0.01, absError=None):
# TODO check, rel or abs in return.
"""Estimate data error.
Create an error of estimated measurement error.
On default it returns an array of constant relative errors.
More sophisticated error estimation should be done
in specialized derived classes.
Parameters |
benatkin/tuneage | urls.py | Python | mit | 511 | 0.001957 | from django.conf.urls.defaults import *
from django.contrib import admin
from django.conf import settings
import os
admin.autodiscover()
urlpatterns = patterns('',
('^$', 'django.views.generic.simple.redirect_to', {'url': '/admin/'}),
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^admin/', include(admin.site.urls)),
ur | l(
r'^med | ia/(.*)$',
'django.views.static.serve',
kwargs={'document_root': os.path.join(settings.PROJECT_PATH, 'media')}
),
)
|
berdario/RangeHTTPServer | RangeHTTPServer.py | Python | apache-2.0 | 4,567 | 0.000876 | #!/usr/bin/python
'''
Use this in the same way as Python's http.server / SimpleHTTPServer:
python -m RangeHTTPServer [port]
The only difference from http.server / SimpleHTTPServer is that RangeHTTPServer supports
'Range:' headers to load portions of files. This is helpful for doing local web
development with genomic data files, which tend to be to large to load into the
browser all at once.
'''
import os
import re
import sys
import argparse
try:
from http.server import SimpleHTTPRequestHandler
import http.server as http_server
except ImportError:
from SimpleHTTPServer import SimpleHTTPRequestHandler
import SimpleHTTPServer as http_server
def copy_byte_range(infile, outfile, start=None, stop=None, bufsize=16*1024):
'''Like shutil.copyfileobj, but only copy a range of the streams.
Both start and stop are inclusive.
'''
if start is not None: infile.seek(start)
while 1:
to_read = min(bufsize, stop + 1 - infile.tell() if stop else bufsize)
buf = infile.read(to_read)
if not buf:
break
outfile.write(buf)
BYTE_RANGE_RE = re.compile(r'bytes=(\d+)-(\d+)?$')
def parse_byte_range(byte_range):
'''Returns the two numbers in 'bytes=123-456' or throws ValueError.
The last number or both numbers may be None.
'''
if byte_range.strip() == '':
return None, None
m = BYTE_RANGE_RE.match(byte_range)
if not m:
raise ValueError('Invalid byte range %s' % byte_range)
first, last = [x and int(x) for x in m.groups()]
if last and last < first:
raise ValueError('Invalid byte range %s' % byte_range)
return first, last
clas | s RangeRequestHandler(SimpleHTTPRequestHandler):
"""Adds support for HTTP 'Range' requests to SimpleHTTPRequestHandler
The approach is to:
- Override send_head to look for 'Range' and respond appropriately.
- Override copyfile to only | transmit a range when requested.
"""
def send_head(self):
if 'Range' not in self.headers:
self.range = None
return SimpleHTTPRequestHandler.send_head(self)
try:
self.range = parse_byte_range(self.headers['Range'])
except ValueError as e:
self.send_error(400, 'Invalid byte range')
return None
first, last = self.range
# Mirroring SimpleHTTPServer.py here
path = self.translate_path(self.path)
f = None
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, 'File not found')
return None
fs = os.fstat(f.fileno())
file_len = fs[6]
if first >= file_len:
self.send_error(416, 'Requested Range Not Satisfiable')
return None
self.send_response(206)
self.send_header('Content-type', ctype)
self.send_header('Accept-Ranges', 'bytes')
if last is None or last >= file_len:
last = file_len - 1
response_length = last - first + 1
self.send_header('Content-Range',
'bytes %s-%s/%s' % (first, last, file_len))
self.send_header('Content-Length', str(response_length))
self.send_header('Last-Modified', self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def copyfile(self, source, outputfile):
if not self.range:
return SimpleHTTPRequestHandler.copyfile(self, source, outputfile)
# SimpleHTTPRequestHandler uses shutil.copyfileobj, which doesn't let
# you stop the copying before the end of the file.
start, stop = self.range # set in send_head()
copy_byte_range(source, outputfile, start, stop)
if __name__ == '__main__':
if sys.version_info[0] == 2:
http_server.test(HandlerClass=RangeRequestHandler)
# Python2's SimpleHTTPServer.test doesn't support bind and port args
else:
parser = argparse.ArgumentParser()
parser.add_argument('--bind', '-b', default='', metavar='ADDRESS',
help='Specify alternate bind address '
'[default: all interfaces]')
parser.add_argument('port', action='store',
default=8000, type=int,
nargs='?',
help='Specify alternate port [default: 8000]')
args = parser.parse_args()
http_server.test(HandlerClass=RangeRequestHandler, port=args.port, bind=args.bind)
|
MicroPyramid/Django-CRM | teams/models.py | Python | mit | 1,068 | 0 | import arrow
from django.db import models
from common.models import Org, Profile
from django.utils.translation import ugettext_lazy as _
class Te | ams(models.Model):
name = models.CharField(max_length=100)
description = models.TextField()
users = models.ManyToManyField(Profile, related_name="user_teams")
created_on = models.DateTimeField(_("Cr | eated on"), auto_now_add=True)
created_by = models.ForeignKey(
Profile,
related_name="teams_created",
blank=True,
null=True,
on_delete=models.SET_NULL,
)
org = models.ForeignKey(
Org, on_delete=models.SET_NULL, null=True, blank=True
)
class Meta:
ordering = ("id",)
def __str__(self):
return self.name
@property
def created_on_arrow(self):
return arrow.get(self.created_on).humanize()
def get_users(self):
return ",".join(
[str(_id) for _id in list(self.users.values_list("id", flat=True))]
)
# return ','.join(list(self.users.values_list('id', flat=True)))
|
444thLiao/VarappX | varapp/migrations/0001_initial.py | Python | gpl-3.0 | 10,400 | 0.002788 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-11 12:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='GeneDetailed',
fields=[
('uid', models.IntegerField(blank=True, primary_key=True, serialize=False)),
('chrom', models.TextField(blank=True, null=True)),
('gene', models.TextField(blank=True, null=True)),
('is_hgnc', models.NullBooleanField()),
('ensembl_gene_id', models.TextField(blank=True, null=True)),
('transcript', models.TextField(blank=True, null=True)),
('biotype', models.TextField(blank=True, null=True)),
('transcript_status', models.TextField(blank=True, null=True)),
('ccds_id', models.TextField(blank=True, null=True)),
('hgnc_id', models.TextField(blank=True, null=True)),
('entrez_id', models.TextField(blank=True, null=True)),
('cds_length', models.TextField(blank=True, null=True)),
('protein_length', models.TextField(blank=True, null=True)),
('transcript_start', models.TextField(blank=True, null=True)),
('transcript_end', models.TextField(blank=True, null=True)),
('strand', models.TextField(blank=True, null=True)),
('synonym', models.TextField(blank=True, null=True)),
('rvis_pct', models.TextField(blank=True, null=True)),
('mam_phenotype_id', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'gene_detailed',
'managed': False,
},
),
migrations.CreateModel(
name='GeneSummary',
fields=[
('uid', models.IntegerField(blank=True, primary_key=True, serialize=False)),
('chrom', models.TextField(blank=True, null=True)),
('gene', models.TextField(blank=True, null=True)),
('is_hgnc', models.NullBooleanField()),
('ensembl_gene_id', models.TextField(blank=True, null=True)),
('hgnc_id', models.TextField(blank=True, null=True)),
('transcript_min_start', models.TextField(blank=True, null=True)),
('transcript_max_end', models.TextField(blank=True, null=True)),
('strand', models.TextField(blank=True, null=True)),
('synonym', models.TextField(blank=True, null=True)),
('rvis_pct', models.TextField(blank=True, null=True)),
('mam_phenotype_id', models.TextField(blank=True, null=True)),
('in_cosmic_census', models.NullBooleanField()),
],
options={
'db_table': 'gene_summary',
'managed': False,
},
),
migrations.CreateModel(
name='Resources',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(blank=True, null=True)),
('resource', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'resources',
'managed': False,
},
),
migrations.CreateModel(
name='SampleGenotypeCounts',
fields=[
('sample_id', models.IntegerField(blank=True, primary_key=True, serialize=False)),
('num_hom_ref', models.IntegerField(blank=True, null=True)),
('num_het', models.IntegerField(blank=True, null=True)),
('num_hom_alt', models.IntegerField(blank=True, null=True)),
('num_unknown', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'sample_genotype_counts',
'managed': False,
},
),
migrations.CreateModel(
name='SampleGenotypes',
fields=[
('sample_id', models.IntegerField(blank=True, primary_key=True, serialize=False)),
('gt_types', models.BinaryField(blank=True, null=True)),
],
options={
'db_table': 'sample_genotypes',
'managed': False,
},
),
migrations.CreateModel(
name='Samples',
fields=[
('sample_id', models.IntegerField(blank=True, primary_key=True, serialize=False)),
('family_id', models.TextField(blank=True, null=True)),
('name', models.TextField(blank=True, null=True, unique=True)),
('paternal_id', models.TextField(blank=True, null=True)),
('maternal_id', models.TextField(blank=True, null=True)),
('sex', models.TextField(blank=True, null=True)),
('phenotype', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'samples',
'managed': False,
},
),
migrations.CreateModel(
name='Variants',
fields=[
('chrom', models.TextField(blank=True)),
('start', models.IntegerField(blank=True, db_column='start', null=True)),
('end', models.IntegerField(blank=True, null=True)),
('variant_id', models.IntegerField(blank=True, primary_key=True, serialize=False)),
('ref', models.TextField(blank=True)),
('alt', models.TextField(blank=True)),
('quality', models.FloatField(blank=True, db_column='qual', null=True)),
('pass_filter', models.TextField(blank=True, db_column='filter')),
('gts_blob', models.BinaryField(blank=True, db_column='gts', null=True)),
('gt_types_blob', models.BinaryField(blank=True, db_column='gt_types', null=True)),
('in_dbsnp', models.NullBooleanField()),
('dbsnp', models.TextField(blank=True, db_column='rs_ids')),
('clinvar_sig', models.TextField(blank=True)),
('clinvar_disease_acc', models.TextField(blank=True)),
('gerp_bp_score', models.FloatField(blank=True, null=True | )),
('gerp_element_pval', models.FloatField(blank=True, null=True)),
('gene_symbol', models.TextField(blank=True, db_column='gene')),
('transcript', models.TextField(blank=True)),
('exon', models.TextField(blank=True)),
| ('is_exonic', models.NullBooleanField()),
('is_coding', models.NullBooleanField()),
('is_lof', models.NullBooleanField()),
('codon_change', models.TextField(blank=True)),
('aa_change', models.TextField(blank=True)),
('impact', models.TextField(blank=True)),
('impact_so', models.TextField(blank=True)),
('impact_severity', models.TextField(blank=True)),
('polyphen_pred', models.TextField(blank=True)),
('polyphen_score', models.FloatField(blank=True)),
('sift_pred', models.TextField(blank=True, null=True)),
('sift_score', models.FloatField(blank=True, null=True)),
('read_depth', models.IntegerField(blank=True, db_column='depth', null=True)),
('rms_map_qual', models.FloatField(blank=True, null=True)),
('qual_depth', models.FloatField(blank=True, null=True)),
('allele_count', models.IntegerField(blank=True, null=True)),
('cadd_raw', models.FloatField(blank=True, null=True)),
('cadd_scaled', models.FloatField(blank=True, null=True)),
('in_esp', models.NullBooleanField()),
('in_1kg', models.NullBooleanField()),
|
Valloric/ycmd | update_api_docs.py | Python | gpl-3.0 | 1,822 | 0.036224 | #!/usr/bin/env python3
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import platform
import sys
import subprocess
DIR_OF_THIS_SCRIPT = os.path.dirname( os.path.abspath( __file__ ) )
DIR_OF_DOCS = os.path.join( DIR_OF_THIS_SCRIPT, 'docs' )
def OnWindows():
return platform.system() == 'Windows'
# On Windows, distutils.spawn.find_executable only works for .exe files
# but .bat and .cmd files are also executables, so we use our own
# implementation.
def FindExecutable( executable ):
# Executable extensions used on Windows
WIN_EXECUTABLE_EXTS = [ '.exe', '.bat', '.cmd' ]
paths = os.environ[ 'PATH' ].split( os.pathsep )
base, extension = os.path.splitext( executable )
if OnWindows() and extension.lower() not in WIN_EXECUTABLE_EXTS:
extensions = WIN_EXECUTABLE_EXTS
else:
extensions = [ '' ]
for extension in extensions:
executable_name = executable + extension
if not os.path.isfile( executable_name ):
for path in paths:
executable_path = os.path.join( path, executable_name )
if os.path.isfile( executable_path ):
return executable_path
else:
return executable_name
return None
def GenerateApiDocs():
npm = FindExecutable( 'npm' )
if not npm:
sys.exit( 'ERROR: NPM is required to generate API docs.' )
os.chdir( os.path.join( DIR_OF_DOCS ) )
subprocess.call( [ npm, 'install', '--production' ] )
bootprint = FindExecutable( os.path.join( DIR_OF_DOCS, 'node_modules',
'.bin', 'bootprint' ) )
api = os.path.join( DIR_OF_DOCS, 'op | enapi.yml' )
subprocess.call( [ bootprint, 'openapi', api, DIR_OF_DO | CS ] )
if __name__ == '__main__':
GenerateApiDocs()
|
RGD2/swapforth | esp8266/esptool2.py | Python | bsd-3-clause | 30,932 | 0.004364 | #!/usr/bin/env python
#
# ESP8266 ROM Bootloader Utility
# https://github.com/themadinventor/esptool
#
# Copyright (C) 2014 Fredrik Ahlberg
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import struct
import serial
import time
import argparse
import os
import subprocess
import tempfile
sys.path.append("../shell")
import swapforth
class TetheredESP(swapforth.TetheredTarget):
def open_ser(self, port, speed):
self.ser = port
def reset(self):
while 1:
c = self.ser.read(1)
if c == b'\x1e':
break
sys.stdout.write(c)
sys.stdout.flush()
print 'ESCAPED'
return
# time.sleep(1)
self.ser.write("words\n1 2 + .x\nwords\n")
self.ser.flush();
while 1:
c = self.ser.read(1)
sys.stdout.write(c)
sys.stdout.flush()
def boot(self, bootfile = None):
sys.stdout.write('Contacting... ')
self.reset()
print('established')
def interrupt(self):
self.reset()
def serialize(self):
l = self.command_response('0 here dump')
lines = l.strip().replace('\r', '').split('\n')
s = []
for l in lines:
l = l.split()
s += [int(b, 16) for b in l[1:17]]
s = array.array('B', s).tostring().ljust(32768, chr(0xff))
return array.array('i', s)
class ESPROM:
# These are the currently known commands supported by the ROM
ESP_FLASH_BEGIN = 0x02
ESP_FLASH_DATA = 0x03
ESP_FLASH_END = 0x04
ESP_MEM_BEGIN = 0x05
ESP_MEM_END = 0x06
ESP_MEM_DATA = 0x07
ESP_SYNC = 0x08
ESP_WRITE_REG = 0x09
ESP_READ_REG = 0x0a
# Maximum block sized for RAM and Flash writes, respectively.
ESP_RAM_BLOCK = 0x1800
ESP_FLASH_BLOCK = 0x400
# Default baudrate. The ROM auto-bauds, so we can use more or less whatever we want.
ESP_ROM_BAUD = 115200
# First byte of the application image
ESP_IMAGE_MAGIC = 0xe9
# Initial state for the checksum routine
ESP_CHECKSUM_MAGIC = 0xef
# OTP ROM addresses
ESP_OTP_MAC0 = 0x3ff00050
ESP_OTP_MAC1 = 0x3ff00054
# Sflash stub: an assembly routine to read from spi flash and send to host
SFLASH_STUB = "\x80\x3c\x00\x40\x1c\x4b\x00\x40\x21\x11\x00\x40\x00\x80" \
"\xfe\x3f\xc1\xfb\xff\xd1\xf8\xff\x2d\x0d\x31\xfd\xff\x41\xf7\xff\x4a" \
"\xdd\x51\xf9\xff\xc0\x05\x00\x21\xf9\xff\x31\xf3\xff\x41\xf5\xff\xc0" \
"\x04\x00\x0b\xcc\x56\xec\xfd\x06\xff\xff\x00\x00"
def __init__(self, port=0, baud=ESP_ROM_BAUD):
self._port = serial.Serial(port)
# setting baud rate in a separate step is a workaround for
# CH341 driver on some Linux versions (this opens at 9600 then
# sets), shouldn't matter for other platforms/drivers. See
# https://github.com/themadinventor/esptool/issues/44#issuecomment-107094446
self._port.baudrate = baud
""" Read bytes from the serial port while performing SLIP unescaping """
def read(self, length=1):
b = ''
while len(b) < length:
c = self._port.read(1)
if c == '\xdb':
c = self._port.read(1)
if c == '\xdc':
b = b + '\xc0'
elif c == '\xdd':
b = b + '\xdb'
else:
raise FatalError('Invalid SLIP escape')
else:
b = b + c
return b
""" Write bytes to the serial port while performing SLIP escaping """
def write(self, packet):
buf = '\xc0' \
+ (packet.replace('\xdb','\xdb\xdd').replace('\xc0','\xdb\xdc')) \
+ '\xc0'
self._port.write(buf)
""" Calculate checksum of a blob, as it is defined by the ROM """
@staticmethod
def checksum(data, state=ESP_CHECKSUM_MAGIC):
for b in data:
state ^= ord(b)
return state
""" Send a request and read the response """
def command(self, op=None, data=None, chk=0):
if op:
pkt = struct.pack('<BBHI', 0x00, op, len(data), chk) + data
self.write(pkt)
# tries to get a response until that response has the
# same operation as the request or a retries limit has
# exceeded. This is needed for some esp8266s that
# reply with more sync responses than expected.
retries = 100
while retries > 0:
(op_ret, val, body) = self.receive_response()
if op is None or op_ret == op:
return val, body # valid response received
retries = retries - 1
raise FatalError("Response doesn't match request")
""" Receive a response to a command """
def receive_response(self):
# Read header of response and parse
if self._port.read(1) != '\xc0':
raise FatalError('Invalid head of packet')
hdr = self.read(8)
(resp, op_ret, len_ret, val) = struct.unpack('<BBHI', hdr)
if resp != 0x01:
raise FatalError('Invalid response 0x%02x" to command' % resp)
# The variable-length body
body = self.read(len_ret)
# Terminating byte
if self._port.read(1) != chr(0xc0):
raise FatalError('Invalid end of packet')
return op_ret, val, body
""" Perform a connection test """
def sync(self):
self.command(ESPROM.ESP_SYNC, '\x07\x07\x12\x20' + 32 * '\x55')
for i in xrange(7):
self.command()
""" Try connecting repeatedly until successful, or giving up """
def connect(self):
print 'Connecting...'
for _ in xrange(4):
# issue reset-to-bootloader:
# RTS = either CH_PD or nRESET (both active low = chip in reset)
# DTR = GPIO0 (active low = boot to flasher)
self._port.setDTR(False)
self._port.setRTS(True)
time.sleep(0.05)
self._port.setDTR(True)
self._port.setRTS(False)
time.sleep(0.05)
self._port.setDTR(False)
# worst-case latency timer should be 255ms (probably <20ms)
self._port.timeout = 0.3
for _ in xrange(4):
try:
self._port.flushInput()
self._port.flushOutput()
self.sync()
self._port.timeout = 5
return
except:
time.sleep(0.05)
raise FatalError('Failed to connect to ESP8266')
""" Read memory address in target """
def read_reg(self, addr):
res = self.command(ESPROM.ESP_READ_REG, struct.pack('<I', addr))
if res[1] != "\0\0":
raise FatalError('Failed to read target memory')
return res[0]
""" Write to memory address in target """
def write_reg(self, addr, value, mask, delay_us=0):
if self.command(ESPROM.ESP_WRITE_REG,
struct.pack('<IIII', addr, value, mask, delay_us))[1] != "\0\0":
raise FatalError('Failed to write target memory')
""" Start downloading an application image to RAM """
def mem_begin(self, size, blocks, blocksize, offset):
if self.command(ESPROM.ESP_MEM_BEGIN,
struct.pack('<IIII', | size, blocks, blocksize, offset))[1] != "\0\ | 0":
ra |
sstoma/CellProfiler | cellprofiler/modules/trackobjects.py | Python | gpl-2.0 | 137,657 | 0.005688 | from cellprofiler.gui.help import USING_METADATA_HELP_REF, USING_METADATA_GROUPING_HELP_REF, LOADING_IMAGE_SEQ_HELP_REF
TM_OVERLAP = 'Overlap'
TM_DISTANCE = 'Distance'
TM_MEASUREMENTS = 'Measurements'
TM_LAP = "LAP"
TM_ALL = [TM_OVERLAP, TM_DISTANCE, TM_MEASUREMENTS,TM_LAP]
LT_NONE = 0
LT_PHASE_1 = 1
LT_SPLIT = 2
LT_MITOSIS = 3
LT_GAP = 4
KM_VEL = 1
KM_NO_VEL = 0
KM_NONE = -1
'''Random motion model, for instance Brownian motion'''
M_RANDOM = "Random"
'''Velocity motion model, object position depends on prior velocity'''
M_VELOCITY = "Velocity"
'''Random and velocity models'''
M_BOTH = "Both"
RADIUS_STD_SETTING_TEXT = 'Number of standard deviations for search radius'
RADIUS_LIMIT_SETTING_TEXT = 'Search radius limit, in pixel units (Min,Max)'
ONLY_IF_2ND_PHASE_LAP_TEXT = '''<i>(Used only if the %(TM_LAP)s tracking method is applied and the second phase is run)</i>'''%globals()
import cellprofiler.icons
from cellprofiler.gui.help import PROTIP_RECOMEND_ICON, PROTIP_AVOID_ICON, TECH_NOTE_ICON
__doc__ = """
<b>Track Objects</b> allows tracking objects throughout sequential
frames of a series of images, so that from frame to frame
each object maintains a unique identity in the output measurements
<hr>
This module must be placed downstream of a module that identifies objects
(e.g., <b>IdentifyPrimaryObjects</b>). <b>TrackObjects</b> will associate each
object with the same object in the frames before and after. This allows the study
of objects' lineages and the timing and characteristics of dynamic events in
movies.
<p>Images in CellProfiler are processed sequentially by frame (whether loaded as a
series of images or a movie file). To process a collection of images/movies,
you will need to do the following:
<ul>
<li>Define each individual movie using metadata
either contained within the image file itself or as part of the images nomenclature
or folder structure. %(USING_METADATA_HELP_REF)s.</li>
<li>Group the movies to make sure
that each image sequence is handled individually. %(USING_METADATA_GROUPING_HELP_REF)s.
</li>
</ul>
For complete details, see <i>%(LOADING_IMAGE_SEQ_HELP_REF)s</i>.</p>
<p>For an example pipeline using TrackObjects, see the CellProfiler
<a href="http://www.cellprofiler.org/examples.shtml#Tracking">Examples</a> webpage.</p>
<h4>Available measurements</h4>
<b>Object measurements</b>
<ul>
<li><i>Label:</i> Each tracked object is assigned a unique identifier (label).
Child objects resulting from a split or merge are assigned the label of the ancestor.</li>
<li><i>ParentImageNumber, ParentObjectNumber:</i> The <i>ImageNumber</i> and
<i>ObjectNumber</i> of the parent object in the prior frame. For a split, each
child object will have the label of the object it split from. For a merge,
the child will have the label of the closest parent.</li>
<li><i>TrajectoryX, TrajectoryY:</i> The direction of motion (in x and y coordinates) of the
object from the previous frame to the current frame.</li>
<li><i>DistanceTraveled:</i> The distance traveled by the object from the
previous frame to the current frame (calculated as the magnitude of
the trajectory vectors).</li>
<li><i>Displacement:</i> The shortest distance traveled by the object from its
initial starting position to the position in the current frame. That is, it is
the straight-line path between the two points.</li>
<li><i>IntegratedDistance:</i> The total distance traveled by the object during
the lifetime of the object.</li>
<li><i>Linearity:</i> A measure of how linear the object trajectity is during the
object lifetime. Calculated as (displacement from initial to final
location)/(integrated object distance). Value is in range of [0,1].</li>
<li><i>Lifetime:</i> The number of frames an objects has existed. The lifetime starts
at 1 at the frame when an object appears, and is incremented with each frame that the
object persists. At the final frame of the image set/movie, the
lifetimes of all remaining objects are output.</li>
<li><i>FinalAge:</i> Similar to <i>LifeTime</i> but is only output at the final
frame of the object's life (or the movie ends, whichever comes first). At this point,
the final age of the object is output; no values are stored for earlier frames.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> This value
is useful if you want to plot a histogram of the object lifetimes; all but the final age
can be ignored or filtered out.</dd>
</dl></li>
</ul>
The following object measurements are specific to the %(TM_LAP)s tracking method:
<ul>
<li><i>LinkType:</i> The linking method used to link the object to its parent.
Possible values are
<ul>
<li><b>%(LT_NONE)d</b>: The object was not linked to a parent.</li>
<li><b>%(LT_PHASE_1)d</b>: The object was linked to a parent in the previous frame.</li>
<li><b>%(LT_SPLIT)d</b>: The object is linked as the start of a split path.</li>
<li><b>%(LT_MITOSIS)s</b>: The object was linked to its parent as a daughter of
a mitotic pair.</li>
<li><b>%(LT_GAP)d</b>: The object was linked to a parent in a frame prior to the
previous frame (a gap).</li>
</ul>
Under some circumstances, multiple linking methods may apply to a given object, e.g, an
object may be both the beginning of a split path and not have a parent. However, only
one linking method is assigned.</li>
<li><i>MovementModel:</i>The movement model used to track the object.
<ul>
<li><b>%(KM_NO_VEL)d</b>: The <i>%(M_RANDOM)s</i> model was used.</li>
<li><b>%(KM_VEL)d</b>: The <i>%(M_VELOCITY)s</i> model was used.</li>
<li><b>-1</b>: Neither model was used. This can occur under two | circumstances:
<ul>
<li>At the beginning o | f a trajectory, when there is no data to determine the model as
yet.</li>
<li>At the beginning of a closed gap, since a model was not actually applied to make
the link in the first phase.</li>
</ul></li>
</ul>
</li>
<li><i>LinkingDistance:</i>The difference between the propagated position of an
object and the object to which it is matched.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> A slowly decaying histogram of
these distances indicates that the search radius is large enough. A cut-off histogram
is a sign that the search radius is too small.</dd>
</dl></li>
<li><i>StandardDeviation:</i>The Kalman filter maintains a running estimate
of the variance of the error in estimated position for each model.
This measurement records the linking distance divided by the standard deviation
of the error when linking the object with its parent.
<dl>
<dd><img src="memory:%(PROTIP_RECOMEND_ICON)s"> This value is multiplied by
the <i>"%(RADIUS_STD_SETTING_TEXT)s"</i> setting to constrain the search distance.
A histogram of this value can help determine if the <i>"%(RADIUS_LIMIT_SETTING_TEXT)s"</i>
setting is appropriate.</dd>
</dl>
</li>
<li><i>GapLength:</i> The number of frames between an object and its parent.
For instance, an object in frame 3 with a parent in frame 1 has a gap length of
2.</li>
<li><i>GapScore:</i> If an object is linked to its parent by bridging a gap,
this value is the score for the gap.</li>
<li><i>SplitScore:</i> If an object linked to its parent via a split, this
value is the score for the split.</li>
<li><i>MergeScore:</i> If an object linked to a child via a merge, this value is
the score for the merge.</li>
<li><i>MitosisScore:</i> If an object linked to two children via a mitosis,
this value is the score for the mitosis.</li>
</ul>
<b>Image measurements</b>
<ul>
<li><i>LostObjectCount:</i> Number of objects that appear in the previous frame
but have no identifiable child in the current frame.</li>
<li><i>NewObjectCount:</i> Number of objects that appear in the current frame but
have no identifiable parent in the previous frame. </li>
<li><i>SplitObjectCount:</i> Number of objects in the current frame that
resulted from a split from a parent object in the previous frame.</li>
<li><i>MergedObjectCount:</i> Number of objects in the current frame that
resulted from the merging of child objects in the previous frame.</li>
</ul>
See also: Any of the <b>Measure</b> modules, <b>IdentifyPrimaryObjects</b>, <b>Groups</b>.
"""%globals()
# CellProfiler is distributed under |
katchengli/tech-interview-prep | interview_cake/ic24.py | Python | apache-2.0 | 777 | 0.005148 | class LinkedListNode:
def __init__(self, value):
self.value = value
self.next = None
def reverseLinkedList(head):
originalPointer = head
newHead = None
secondNew = None
while originalPointer != None:
newHead = originalPointer
originalPointer = originalPointer.next
newHead.next = secondNew
secondNew = newHead
return newHead
node1 = LinkedListNode(1)
node2 = LinkedListNode(2)
node3 = LinkedListNo | de(3)
node4 = LinkedListNode(4)
node1.next = node2
node2.next = node3
node3.next = node4
node4.next = None
newHead = reverseLinkedList(node1)
while newHead != None:
print(newHead.value)
newHead = new | Head.next
print(reverseLinkedList(None))
print(reverseLinkedList(LinkedListNode(6)).value)
|
mxOBS/deb-pkg_trusty_chromium-browser | v8/tools/push-to-trunk/bump_up_version.py | Python | bsd-3-clause | 7,906 | 0.006957 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Script for auto-increasing the version on bleeding_edge.
The script can be run regularly by a cron job. It will increase the build
level of the version on bleeding_edge if:
- the lkgr version is smaller than the version of the latest revision,
- the lkgr version is not a version change itself,
- the tree is not closed for maintenance.
The new version will be the maximum of the bleeding_edge and trunk versions +1.
E.g. latest bleeding_edge version: 3.22.11.0 and latest trunk 3.23.0.0 gives
the new version 3.23.1.0.
This script requires a depot tools git checkout. I.e. 'fetch v8'.
"""
import argparse
import os
import sys
from common_includes import *
VERSION_BRANCH = "auto-bump-up-version"
# TODO(machenbach): Add vc interface that works on git mirror.
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
# TODO(machenbach): Remove after the git switch.
if(self.Config("PERSISTFILE_BASENAME") ==
"/tmp/v8-bump-up-version-tempfile"):
print "This script is disabled until after the v8 git migration."
return True
# Check for a clean workdir.
if not self.GitIsWorkdirClean(): # pragma: no cover
# This is in case a developer runs this script on a dirty tree.
self.GitStash()
self.GitCheckout("master")
self.GitPull()
# Ensure a clean version branch.
self.DeleteBranch(VERSION_BRANCH)
class GetCurrentBleedingEdgeVersion(Step):
MESSAGE = "Get latest bleeding edge version."
def RunStep(self):
self.GitCheckout("master")
# Store latest version and revision.
self.ReadAndPersistVersion()
self["latest_version"] = self.ArrayToVersion("")
self["latest"] = self.GitLog(n=1, format="%H")
print "Bleeding edge version: %s" % self["latest_version"]
# This step is pure paranoia. It forbids the script to continue if the last
# commit changed version.cc. Just in case the other bailout has a bug, this
# prevents the script from continuously commiting version changes.
class LastChangeBailout(Step):
MESSAGE = "Stop script if the last change modified the version."
def RunStep(self):
if VERSION_FILE in self.GitChangedFiles(self["latest"]):
print "Stop due to recent version change."
return True
# TODO(machenbach): Implement this for git.
class FetchLKGR(Step):
MESSAGE = "Fetching V8 LKGR."
def RunStep(self):
lkgr_url = "https://v8-status.appspot.com/lkgr"
self["lkgr_svn"] = self.ReadURL(lkgr_url, wait_plan=[5])
# TODO(machenbach): Implement this for git. With a git lkgr we could simply
# checkout that revision. With svn, we have to search backwards until that
# revision is found.
class GetLKGRVersion(Step):
MESSAGE = "Get bleeding edge lkgr version."
def RunStep(self):
self.GitCheckout("master")
# If the commit was made from svn, there is a mapping entry in the commit
# message.
self["lkgr"] = self.GitLog(
grep="^git-svn-id: [^@]*@%s [A-Za-z0-9-]*$" % self["lkgr_svn"],
format="%H")
# FIXME(machenbach): http://crbug.com/391712 can lead to svn lkgrs on the
# trunk branch (rarely).
if not self["lkgr"]: # pragma: no cover
self.Die("No git hash found for svn lkgr.")
self.GitCreateBranch(VERSION_BRANCH, self["lkgr"])
self.ReadAndPersistVersion("lkgr_")
self["lkgr_version"] = self.Arra | yToVersion("lkgr_")
print "LKGR version: %s" % self["lkgr_version"]
# Ensure a clean version branch.
self.GitCheckout("master")
self.DeleteBranch(VERSION_BRANCH)
class LKGRVersionUpToDateBailout(Step):
MESSAGE = "Stop script if the lkgr has a renewed version."
def RunStep(self):
# If a version-change commit becomes the lkgr, don't bump up the version
# again.
if VERSION_FILE in self.GitChangedF | iles(self["lkgr"]):
print "Stop because the lkgr is a version change itself."
return True
# Don't bump up the version if it got updated already after the lkgr.
if SortingKey(self["lkgr_version"]) < SortingKey(self["latest_version"]):
print("Stop because the latest version already changed since the lkgr "
"version.")
return True
class GetTrunkVersion(Step):
MESSAGE = "Get latest trunk version."
def RunStep(self):
self.GitCheckout("candidates")
self.GitPull()
self.ReadAndPersistVersion("trunk_")
self["trunk_version"] = self.ArrayToVersion("trunk_")
print "Trunk version: %s" % self["trunk_version"]
class CalculateVersion(Step):
MESSAGE = "Calculate the new version."
def RunStep(self):
if self["lkgr_build"] == "9999": # pragma: no cover
# If version control on bleeding edge was switched off, just use the last
# trunk version.
self["lkgr_version"] = self["trunk_version"]
# The new version needs to be greater than the max on bleeding edge and
# trunk.
max_version = max(self["trunk_version"],
self["lkgr_version"],
key=SortingKey)
# Strip off possible leading zeros.
self["new_major"], self["new_minor"], self["new_build"], _ = (
map(str, map(int, max_version.split("."))))
self["new_build"] = str(int(self["new_build"]) + 1)
self["new_patch"] = "0"
self["new_version"] = ("%s.%s.%s.0" %
(self["new_major"], self["new_minor"], self["new_build"]))
print "New version is %s" % self["new_version"]
if self._options.dry_run: # pragma: no cover
print "Dry run, skipping version change."
return True
class CheckTreeStatus(Step):
MESSAGE = "Checking v8 tree status message."
def RunStep(self):
status_url = "https://v8-status.appspot.com/current?format=json"
status_json = self.ReadURL(status_url, wait_plan=[5, 20, 300, 300])
message = json.loads(status_json)["message"]
if re.search(r"maintenance|no commits", message, flags=re.I):
print "Skip version change by tree status: \"%s\"" % message
return True
class ChangeVersion(Step):
MESSAGE = "Bump up the version."
def RunStep(self):
self.GitCreateBranch(VERSION_BRANCH, "master")
self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_")
try:
msg = "[Auto-roll] Bump up version to %s" % self["new_version"]
self.GitCommit("%s\n\nTBR=%s" % (msg, self._options.author),
author=self._options.author)
self.GitUpload(author=self._options.author,
force=self._options.force_upload,
bypass_hooks=True)
self.GitCLLand()
print "Successfully changed the version."
finally:
# Clean up.
self.GitCheckout("master")
self.DeleteBranch(VERSION_BRANCH)
class BumpUpVersion(ScriptsBase):
def _PrepareOptions(self, parser):
parser.add_argument("--dry_run", help="Don't commit the new version.",
default=False, action="store_true")
def _ProcessOptions(self, options): # pragma: no cover
if not options.dry_run and not options.author:
print "Specify your chromium.org email with -a"
return False
options.wait_for_lgtm = False
options.force_readline_defaults = True
options.force_upload = True
return True
def _Config(self):
return {
"PERSISTFILE_BASENAME": "/tmp/v8-bump-up-version-tempfile",
"PATCH_FILE": "/tmp/v8-bump-up-version-tempfile-patch-file",
}
def _Steps(self):
return [
Preparation,
GetCurrentBleedingEdgeVersion,
LastChangeBailout,
FetchLKGR,
GetLKGRVersion,
LKGRVersionUpToDateBailout,
GetTrunkVersion,
CalculateVersion,
CheckTreeStatus,
ChangeVersion,
]
if __name__ == "__main__": # pragma: no cover
sys.exit(BumpUpVersion().Run())
|
openatv/enigma2 | lib/python/timer.py | Python | gpl-2.0 | 10,631 | 0.027091 | from bisect import insort
from time import time, localtime, mktime
from enigma import eTimer, eActionMap
import datetime
class TimerEntry:
StateWaiting = 0
StatePrepared = 1
StateRunning = 2
StateEnded = 3
StateFailed = 4
def __init__(self, begin, end):
self.begin = begin
self.prepare_time = 20
self.end = end
self.state = 0
self.findRunningEvent = True
self.findNextEvent = False
self.r | esetRepeated()
#begindate = localtime(self.begin)
#newdate = datetime.datetime(begindate.tm_year, begindate.tm_mon, begindate.t | m_mday 0, 0, 0);
self.repeatedbegindate = begin
self.backoff = 0
self.disabled = False
self.failed = False
def resetState(self):
self.state = self.StateWaiting
self.cancelled = False
self.first_try_prepare = 0
self.findRunningEvent = True
self.findNextEvent = False
self.timeChanged()
def resetRepeated(self):
self.repeated = int(0)
def setRepeated(self, day):
self.repeated |= (2 ** day)
def isRunning(self):
return self.state == self.StateRunning
def addOneDay(self, timedatestruct):
oldHour = timedatestruct.tm_hour
newdate = (datetime.datetime(timedatestruct.tm_year, timedatestruct.tm_mon, timedatestruct.tm_mday, timedatestruct.tm_hour, timedatestruct.tm_min, timedatestruct.tm_sec) + datetime.timedelta(days=1)).timetuple()
if localtime(mktime(newdate)).tm_hour != oldHour:
return (datetime.datetime(timedatestruct.tm_year, timedatestruct.tm_mon, timedatestruct.tm_mday, timedatestruct.tm_hour, timedatestruct.tm_min, timedatestruct.tm_sec) + datetime.timedelta(days=2)).timetuple()
return newdate
def isFindRunningEvent(self):
return self.findRunningEvent
def isFindNextEvent(self):
return self.findNextEvent
# update self.begin and self.end according to the self.repeated-flags
def processRepeated(self, findRunningEvent=True, findNextEvent=False):
if self.repeated != 0:
now = int(time()) + 1
if findNextEvent:
now = self.end + 120
self.findRunningEvent = findRunningEvent
self.findNextEvent = findNextEvent
#to avoid problems with daylight saving, we need to calculate with localtime, in struct_time representation
localrepeatedbegindate = localtime(self.repeatedbegindate)
localbegin = localtime(self.begin)
localend = localtime(self.end)
localnow = localtime(now)
day = []
flags = self.repeated
for x in (0, 1, 2, 3, 4, 5, 6):
if flags & 1 == 1:
day.append(0)
else:
day.append(1)
flags >>= 1
# if day is NOT in the list of repeated days
# OR if the day IS in the list of the repeated days, check, if event is currently running... then if findRunningEvent is false, go to the next event
while ((day[localbegin.tm_wday] != 0) or (mktime(localrepeatedbegindate) > mktime(localbegin)) or
(day[localbegin.tm_wday] == 0 and (findRunningEvent and localend < localnow) or ((not findRunningEvent) and localbegin < localnow))):
localbegin = self.addOneDay(localbegin)
localend = self.addOneDay(localend)
#we now have a struct_time representation of begin and end in localtime, but we have to calculate back to (gmt) seconds since epoch
self.begin = int(mktime(localbegin))
self.end = int(mktime(localend))
if self.begin == self.end:
self.end += 1
self.timeChanged()
def __lt__(self, o):
return self.getNextActivation() < o.getNextActivation()
# must be overridden
def activate(self):
pass
# can be overridden
def timeChanged(self):
pass
# check if a timer entry must be skipped
def shouldSkip(self):
if self.disabled:
if self.end <= time() and not "PowerTimerEntry" in repr(self):
self.disabled = False
return True
if "PowerTimerEntry" in repr(self):
if (self.timerType == 3 or self.timerType == 4) and self.autosleeprepeat != 'once':
return False
elif self.begin >= time() and (self.timerType == 3 or self.timerType == 4) and self.autosleeprepeat == 'once':
return False
elif (self.timerType == 3 or self.timerType == 4) and self.autosleeprepeat == 'once' and self.state != TimerEntry.StatePrepared:
return True
else:
return self.end <= time() and self.state == TimerEntry.StateWaiting and self.timerType != 3 and self.timerType != 4
else:
return self.end <= time() and (self.state == TimerEntry.StateWaiting or self.state == TimerEntry.StateFailed)
def abort(self):
self.end = time()
# in case timer has not yet started, but gets aborted (so it's preparing),
# set begin to now.
if self.begin > self.end:
self.begin = self.end
self.cancelled = True
# must be overridden!
def getNextActivation(self):
pass
def fail(self):
self.faileded = True
def disable(self):
self.disabled = True
def enable(self):
self.disabled = False
class Timer:
# the time between "polls". We do this because
# we want to account for time jumps etc.
# of course if they occur <100s before starting,
# it's not good. thus, you have to repoll when
# you change the time.
#
# this is just in case. We don't want the timer
# hanging. we use this "edge-triggered-polling-scheme"
# anyway, so why don't make it a bit more fool-proof?
MaxWaitTime = 100
def __init__(self):
self.timer_list = []
self.processed_timers = []
self.timer = eTimer()
self.timer.callback.append(self.calcNextActivation)
self.lastActivation = time()
self.calcNextActivation()
self.on_state_change = []
def stateChanged(self, entry):
for f in self.on_state_change:
f(entry)
def cleanup(self):
self.processed_timers = [entry for entry in self.processed_timers if entry.disabled]
def cleanupDisabled(self):
disabled_timers = [entry for entry in self.processed_timers if entry.disabled]
for timer in disabled_timers:
timer.shouldSkip()
def cleanupDaily(self, days):
limit = time() - (days * 3600 * 24)
self.processed_timers = [entry for entry in self.processed_timers if (entry.disabled and entry.repeated) or (entry.end and (entry.end > limit))]
def addTimerEntry(self, entry, noRecalc=0):
entry.processRepeated()
# when the timer has not yet started, and is already passed,
# don't go trough waiting/running/end-states, but sort it
# right into the processedTimers.
if entry.shouldSkip() or entry.state == TimerEntry.StateEnded or (entry.state == TimerEntry.StateWaiting and entry.disabled):
insort(self.processed_timers, entry)
entry.state = TimerEntry.StateEnded
else:
insort(self.timer_list, entry)
if not noRecalc:
self.calcNextActivation()
# small piece of example code to understand how to use record simulation
# if NavigationInstance.instance:
# lst = [ ]
# cnt = 0
# for timer in self.timer_list:
# print("timer %s" % str(cnt))
# cnt += 1
# if timer.state == 0: #waiting
# lst.append(NavigationInstance.instance.recordService(timer.service_ref))
# else:
# print("STATE: %s" % str(timer.state))
#
# for rec in lst:
# if rec.start(True): #simulate
# print("FAILED!!!!!!!!!!!!")
# else:
# print("OK!!!!!!!!!!!!!!")
# NavigationInstance.instance.stopRecordService(rec)
# else:
# print("no NAV")
def setNextActivation(self, now, when):
delay = int((when - now) * 1000)
self.timer.start(delay, 1)
self.next = when
def calcNextActivation(self):
now = time()
if self.lastActivation > now:
print("[timer] timewarp - re-evaluating all processed timers.")
tl = self.processed_timers
self.processed_timers = []
for x in tl:
# simulate a "waiting" state to give them a chance to re-occure
x.resetState()
self.addTimerEntry(x, noRecalc=1)
self.processActivation()
self.lastActivation = now
min = int(now) + self.MaxWaitTime
self.timer_list and self.timer_list.sort() # resort/refresh list, try to fix hanging timers
# calculate next activation point
timer_list = [t for t in self.timer_list if not t.disabled]
if timer_list:
w = timer_list[0].getNextActivation()
if w < min:
min = w
if int(now) < 1072224000 and min > now + 5:
# system time has not yet been set (before 01.01.2004), keep a short poll interval
min = now + 5
self.setNextActivation(now, min)
def timeChanged(self, timer):
timer.timeChanged()
if tim |
tynn/numpy | numpy/core/numeric.py | Python | bsd-3-clause | 87,411 | 0.000114 | from __future__ import division, absolute_import, print_function
try:
# Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
import itertools
import operator
import sys
import warnings
import numbers
import numpy as np
from . import multiarray
from .multiarray import (
_fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS,
BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE,
WRAP, arange, array, broadcast, can_cast, compare_chararrays,
concatenate, copyto, count_nonzero, dot, dtype, empty,
empty_like, flatiter, frombuffer, fromfile, fromiter, fromstring,
inner, int_asbuffer, lexsort, matmul, may_share_memory,
min_scalar_type, ndarray, nditer, nested_iters, promote_types,
putmask, result_type, set_numeric_ops, shares_memory, vdot, where,
zeros, normalize_axis_index)
if sys.version_info[0] < 3:
from .multiarray import newbuffer, getbuffer
from . import umath
from .umath import (multiply, invert, sin, UFUNC_BUFSIZE_DEFAULT,
ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT,
ERR_LOG, ERR_DEFAULT, PINF, NAN)
from . import numerictypes
from .numerictypes import longlong, intc, int_, float_, complex_, bool_
from ._internal import TooHardError, AxisError
bitwise_not = invert
ufunc = type(sin)
newaxis = None
if sys.version_info[0] >= 3:
import pickle
basestring = str
import builtins
else:
import cPickle as pickle
import __builtin__ as builtins
def loads(*args, **kwargs):
# NumPy 1.15.0, 2017-12-10
warnings.warn(
"np.core.numeric.loads is deprecated, use pickle.loads instead",
DeprecationWarning, stacklevel=2)
return pickle.loads(*args, **kwargs)
__all__ = [
'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
'arange', 'array', 'zeros', 'count_nonzero', 'empty', ' | broadcast', 'dtype',
'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer', 'where',
'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort',
'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type',
'result_type', 'asarray', 'asanyarray', 'ascontiguousarray',
| 'asfortranarray', 'isfortran', 'empty_like', 'zeros_like', 'ones_like',
'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll',
'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', 'require',
'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction',
'isclose', 'load', 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones',
'identity', 'allclose', 'compare_chararrays', 'putmask', 'seterr',
'geterr', 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall',
'errstate', 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN',
'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS',
'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like',
'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS',
'MAY_SHARE_EXACT', 'TooHardError', 'AxisError']
if sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
As implemented, casting a complex number to a real discards its imaginary
part, but this behavior may not be what the user actually wants.
"""
pass
def zeros_like(a, dtype=None, order='K', subok=True):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
>>> y = np.arange(3, dtype=float)
>>> y
array([ 0., 1., 2.])
>>> np.zeros_like(y)
array([ 0., 0., 0.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok)
# needed instead of a 0 to get same result as zeros for for string dtypes
z = zeros(1, dtype=res.dtype)
multiarray.copyto(res, z, casting='unsafe')
return res
def ones(shape, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with ones.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: C
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
Returns
-------
out : ndarray
Array of ones with the given shape, dtype, and order.
See Also
--------
zeros, ones_like
Examples
--------
>>> np.ones(5)
array([ 1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
array([[ 1.],
[ 1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[ 1., 1.],
[ 1., 1.]])
"""
a = empty(shape, dtype, order)
multiarray.copyto(a, 1, casting='unsafe')
return a
def ones_like(a, dtype=None, order='K', subok=True):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of ones with the same shape and type as `a`.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.ones_like(x)
array([[1, 1, 1],
[1, 1, 1]])
>>> y = np.arange(3, dtype=float)
>>> y
array([ 0., 1., 2.])
>>> np.ones_like(y)
|
VipulSarin/citenet | scopus_module/insert_papers.py | Python | gpl-3.0 | 2,317 | 0.022874 | from py2neo import authenticate, Graph, Node, Relationship
from scopus.scopus_api import ScopusAbstract
from paper_abstract import *
import pdb
import argparse
import json
import os
import sys
reload (sys)
sys.setdefaultencoding('UTF-8')
def init_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("filename",help="Name of file with papers to be inserted")
parser.add_argument("-s","--server",help="IP address of neo4j server,default=localhost")
parser.add_argument("-p","--port",help="Port number on which neo4j listens,default=7474")
args=parser.parse_args()
filename = args.filename
server=args.server if args.server else "localhost"
port=args.port if args.port else "7474"
return (filename,server,port)
def connect_parent_node(curr_paper,parent_id):
if parent_id != None:
query = 'MATCH(parent:Paper{scopus_id:"'+parent_id+'"}) MATCH(curr:Paper{scopus_id:"'+curr_paper.scopus_id+'"}) MERGE (parent)-[c:CITES]->(curr)'
graph.run(query)
def perform_dfs(curr_paper,count):
if curr_paper.references != None:
for ref_id in curr_paper.references:
dfs(ref_id,curr_paper.scopus_id,count+1)
def dfs(curr_paper_id,parent_id,count):
if count > 0:
return
curr_paper = PaperAbstract(curr_paper_id)
if curr_paper.no_abstract == False and curr_paper.title != None:
curr_paper.bind_remote(graph)
connect_parent_node(curr_paper,parent_id)
perform_dfs(curr_pap | er,count)
(filen | ame,server,port) = init_arg_parser()
filename=filename.replace("\n","")
authenticate(server+":"+port, "neo4j", "3800")
graph = Graph()
with open(filename) as f:
paper_ids = f.readlines()
paper_ids = [x.strip('\n') for x in paper_ids]
completed = 0
data={}
data['total']=len(paper_ids)
data['completed']=completed
output_file=filename.replace(".txt","")
with open(output_file+'_output.txt','w+') as f:
f.write(json.dumps(data))
try:
for paper_id in paper_ids:
try:
dfs(paper_id,None,0)
except Exception,e:
pass
completed+=1
data['completed']=completed
data['curr_paper']=paper_id
with open(output_file+"_output.txt",'w+') as f:
f.write(json.dumps(data))
except Exception,e:
os.remove(output_file+'_output.txt')
|
ossanna16/django-rest-framework | tests/test_schemas.py | Python | bsd-2-clause | 28,125 | 0.002133 | import unittest
import pytest
from django.conf.urls import include, url
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.test import TestCase, override_settings
from rest_framework import filters, pagination, permissions, serializers
from rest_framework.compat import coreapi, coreschema
from rest_framework.decorators import (
api_view, detail_route, list_route, schema
)
from rest_framework.request import Request
from rest_framework.routers import DefaultRouter
from rest_framework.schemas import (
AutoSchema, ManualSchema, SchemaGenerator, get_schema_view
)
from rest_framework.schemas.generators import EndpointEnumerator
from rest_framework.test import APIClient, APIRequestFactory
from rest_framework.utils import formatting
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
factory = APIRequestFactory()
class MockUser(object):
def is_authenticated(self):
return True
class ExamplePagination(pagination.PageNumberPagination):
page_size = 100
page_size_query_param = 'page_size'
class EmptySerializer(serializers.Serializer):
pass
class ExampleSerializer(serializers.Serializer):
a = serializers.CharField(required=True, help_text='A field description')
b = serializers.CharField(required=False)
read_only = serializers.CharField(read_only=True)
hidden = serializers.HiddenField(default='hello')
class AnotherSerializerWithListFields(serializers.Serializer):
a = serializers.ListField(child=serializers.IntegerField())
b = serializers.ListSerializer(child=serializers.CharField())
class AnotherSerializer(serializers.Serializer):
c = serializers.CharField(required=True)
d = serializers.CharField(required=False)
class ExampleViewSet(ModelViewSet):
pagination_class = ExamplePagination
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
filter_backends = [filters.OrderingFilter]
serializer_class = ExampleSerializer
@detail_route(methods=['post'], serializer_class=AnotherSerializer)
def custom_action(self, request, pk):
"""
A description of custom action.
"""
return super(ExampleSerializer, self).retrieve(self, request)
@detail_route(methods=['post'], serializer_class=AnotherSerializerWithLis | tFields)
def custom_action_with_list_fields(self, request, pk):
"""
A custom action using both list field and list serializer in the serializer.
"""
return super(ExampleSerializer, self).retrieve(self, request)
| @list_route()
def custom_list_action(self, request):
return super(ExampleViewSet, self).list(self, request)
@list_route(methods=['post', 'get'], serializer_class=EmptySerializer)
def custom_list_action_multiple_methods(self, request):
return super(ExampleViewSet, self).list(self, request)
def get_serializer(self, *args, **kwargs):
assert self.request
assert self.action
return super(ExampleViewSet, self).get_serializer(*args, **kwargs)
if coreapi:
schema_view = get_schema_view(title='Example API')
else:
def schema_view(request):
pass
router = DefaultRouter()
router.register('example', ExampleViewSet, base_name='example')
urlpatterns = [
url(r'^$', schema_view),
url(r'^', include(router.urls))
]
@unittest.skipUnless(coreapi, 'coreapi is not installed')
@override_settings(ROOT_URLCONF='tests.test_schemas')
class TestRouterGeneratedSchema(TestCase):
def test_anonymous_request(self):
client = APIClient()
response = client.get('/', HTTP_ACCEPT='application/coreapi+json')
assert response.status_code == 200
expected = coreapi.Document(
url='http://testserver/',
title='Example API',
content={
'example': {
'list': coreapi.Link(
url='/example/',
action='get',
fields=[
coreapi.Field('page', required=False, location='query', schema=coreschema.Integer(title='Page', description='A page number within the paginated result set.')),
coreapi.Field('page_size', required=False, location='query', schema=coreschema.Integer(title='Page size', description='Number of results to return per page.')),
coreapi.Field('ordering', required=False, location='query', schema=coreschema.String(title='Ordering', description='Which field to use when ordering the results.'))
]
),
'custom_list_action': coreapi.Link(
url='/example/custom_list_action/',
action='get'
),
'custom_list_action_multiple_methods': {
'read': coreapi.Link(
url='/example/custom_list_action_multiple_methods/',
action='get'
)
},
'read': coreapi.Link(
url='/example/{id}/',
action='get',
fields=[
coreapi.Field('id', required=True, location='path', schema=coreschema.String()),
coreapi.Field('ordering', required=False, location='query', schema=coreschema.String(title='Ordering', description='Which field to use when ordering the results.'))
]
)
}
}
)
assert response.data == expected
def test_authenticated_request(self):
client = APIClient()
client.force_authenticate(MockUser())
response = client.get('/', HTTP_ACCEPT='application/coreapi+json')
assert response.status_code == 200
expected = coreapi.Document(
url='http://testserver/',
title='Example API',
content={
'example': {
'list': coreapi.Link(
url='/example/',
action='get',
fields=[
coreapi.Field('page', required=False, location='query', schema=coreschema.Integer(title='Page', description='A page number within the paginated result set.')),
coreapi.Field('page_size', required=False, location='query', schema=coreschema.Integer(title='Page size', description='Number of results to return per page.')),
coreapi.Field('ordering', required=False, location='query', schema=coreschema.String(title='Ordering', description='Which field to use when ordering the results.'))
]
),
'create': coreapi.Link(
url='/example/',
action='post',
encoding='application/json',
fields=[
coreapi.Field('a', required=True, location='form', schema=coreschema.String(title='A', description='A field description')),
coreapi.Field('b', required=False, location='form', schema=coreschema.String(title='B'))
]
),
'read': coreapi.Link(
url='/example/{id}/',
action='get',
fields=[
coreapi.Field('id', required=True, location='path', schema=coreschema.String()),
coreapi.Field('ordering', required=False, location='query', schema=coreschema.String(title='Ordering', description='Which field to use when ordering the results.'))
]
),
'custom_action': coreapi.Link(
url='/example/{id}/custom_action/',
action='post',
encoding='application/json',
description='A description of custom acti |
rspavel/spack | lib/spack/spack/modules/common.py | Python | lgpl-2.1 | 30,792 | 0 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Here we consolidate the logic for creating an abstract description
of the information that module systems need.
This information maps **a single spec** to:
* a unique module filename
* the module file content
and is divided among four classes:
* a configuration class that provides a convenient interface to query
details about the configuration for the spec under consideration.
* a layout class that provides the information associated with module
file names and directories
* a context class that provides the dictionary used by the template engine
to generate the module file
* a writer that collects and uses the information above to either write
or remove the module file
Each of the four classes needs to be sub-classed when implementing a new
module type.
"""
import collections
import copy
import datetime
import inspect
import os.path
import re
import llnl.util.filesystem
import llnl.util.tty as tty
import spack.build_environment as build_environment
import spack.error
import spack.paths
import spack.schema.environment
import spack.projections as proj
import spack.tengine as tengine
import spack.util.environment
import spack.util.file_permissions as fp
import spack.util.path
import spack.util.spack_yaml as syaml
#: config section for this file
def configuration():
return spack.config.get('modules', {})
#: Valid tokens for naming scheme and env variable names
_valid_tokens = (
'name',
'version',
'compiler',
'compiler.name',
'compiler.version',
'architecture',
# tokens from old-style format strings
'package',
'compilername',
'compilerver',
)
def _check_tokens_are_valid(format_string, message):
"""Checks that the tokens used in the format string are valid in
the context of module file and environment variable naming.
Args:
format_string (str): string containing the format to be checked. This
is supposed to be a 'template' for ``Spec.format``
message (str): first sentence of the error message in case invalid
tokens are found
"""
named_tokens = re.findall(r'{(\w*)}', format_string)
invalid_tokens = [x for x in named_tokens
if x.lower() not in _valid_tokens]
if invalid_tokens:
msg = message
msg += ' [{0}]. '.format(', '.join(invalid_tokens))
msg += 'Did you check your "modules.yaml" configuration?'
raise RuntimeError(msg)
def update_dictionary_extending_lists(target, update):
"""Updates a dictionary, but extends lists instead of overriding them.
Args:
target: dictionary to be updated
update: update to be applied
"""
for key in update:
value = target.get(key, None)
if isinstance(value, list):
target[key].extend(update[key])
elif isinst | ance(value, dict):
update_dictionary_extending_lists(target[key], update[key])
else:
target[k | ey] = update[key]
def dependencies(spec, request='all'):
"""Returns the list of dependent specs for a given spec, according to the
request passed as parameter.
Args:
spec: spec to be analyzed
request: either 'none', 'direct' or 'all'
Returns:
list of dependencies
The return list will be empty if request is 'none', will contain
the direct dependencies if request is 'direct', or the entire DAG
if request is 'all'.
"""
if request not in ('none', 'direct', 'all'):
message = "Wrong value for argument 'request' : "
message += "should be one of ('none', 'direct', 'all')"
raise tty.error(message + " [current value is '%s']" % request)
if request == 'none':
return []
if request == 'direct':
return spec.dependencies(deptype=('link', 'run'))
# FIXME : during module file creation nodes seem to be visited multiple
# FIXME : times even if cover='nodes' is given. This work around permits
# FIXME : to get a unique list of spec anyhow. Do we miss a merge
# FIXME : step among nodes that refer to the same package?
seen = set()
seen_add = seen.add
deps = sorted(
spec.traverse(order='post',
cover='nodes',
deptype=('link', 'run'),
root=False),
reverse=True)
return [d for d in deps if not (d in seen or seen_add(d))]
def merge_config_rules(configuration, spec):
"""Parses the module specific part of a configuration and returns a
dictionary containing the actions to be performed on the spec passed as
an argument.
Args:
configuration: module specific configuration (e.g. entries under
the top-level 'tcl' key)
spec: spec for which we need to generate a module file
Returns:
dict: actions to be taken on the spec passed as an argument
"""
# Get the top-level configuration for the module type we are using
module_specific_configuration = copy.deepcopy(configuration)
# Construct a dictionary with the actions we need to perform on the spec
# passed as a parameter
# The keyword 'all' is always evaluated first, all the others are
# evaluated in order of appearance in the module file
spec_configuration = module_specific_configuration.pop('all', {})
for constraint, action in module_specific_configuration.items():
override = False
if constraint.endswith(':'):
constraint = constraint.strip(':')
override = True
if spec.satisfies(constraint, strict=True):
if override:
spec_configuration = {}
update_dictionary_extending_lists(spec_configuration, action)
# Transform keywords for dependencies or prerequisites into a list of spec
# Which modulefiles we want to autoload
autoload_strategy = spec_configuration.get('autoload', 'none')
spec_configuration['autoload'] = dependencies(spec, autoload_strategy)
# Which instead we want to mark as prerequisites
prerequisite_strategy = spec_configuration.get('prerequisites', 'none')
spec_configuration['prerequisites'] = dependencies(
spec, prerequisite_strategy)
# Attach options that are spec-independent to the spec-specific
# configuration
# Hash length in module files
hash_length = module_specific_configuration.get('hash_length', 7)
spec_configuration['hash_length'] = hash_length
verbose = module_specific_configuration.get('verbose', False)
spec_configuration['verbose'] = verbose
return spec_configuration
def root_path(name):
"""Returns the root folder for module file installation.
Args:
name: name of the module system to be used (e.g. 'tcl')
Returns:
root folder for module file installation
"""
# Root folders where the various module files should be written
roots = spack.config.get('config:module_roots', {})
path = roots.get(name, os.path.join(spack.paths.share_path, name))
return spack.util.path.canonicalize_path(path)
def generate_module_index(root, modules, overwrite=False):
index_path = os.path.join(root, 'module-index.yaml')
if overwrite or not os.path.exists(index_path):
entries = syaml.syaml_dict()
else:
with open(index_path) as index_file:
yaml_content = syaml.load(index_file)
entries = yaml_content['module_index']
for m in modules:
entry = {
'path': m.layout.filename,
'use_name': m.layout.use_name
}
entries[m.spec.dag_hash()] = entry
index = {'module_index': entries}
llnl.util.filesystem.mkdirp(root)
with open(index_path, 'w') as index_file:
syaml.dump(index, default_flow_style=False, stream=index_file)
def _generate_upstream_module_index():
module_indices = read_module_indices()
return UpstreamModuleIndex(spack.store.db, mo |
chemelnucfin/tensorflow | tensorflow/python/keras/layers/lstm_v2_test.py | Python | apache-2.0 | 37,712 | 0.002864 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 LSTM layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import keras
from tensorflow.python.client import session as session_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import recurrent as rnn_v1
from tensorflow.python.keras.layers import recurrent_v2 as rnn
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import nest
# Global config for grappler setting that is used for graph mode test.
_rewrites = rewriter_config_pb2.RewriterConfig()
_rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
_rewrites.min_graph_nodes = -1
_graph_options = config_pb2.GraphOptions(rewrite_options=_rewrites)
_config = config_pb2.ConfigProto(graph_options=_graph_options)
@keras_parameterized.run_all_keras_modes(config=_config)
class LSTMV2Test(keras_parameterized.TestCase):
@parameterized.named_parameters(
('non_tan_activation', 'relu', 'sigmoid', 0, False, True),
('non_sigmoid_recur_activation', 'tanh', 'relu', 0, False, True),
('use_recurrent_dropout', 'tanh', 'sigmoid', 0.1, False, True),
('unroll', 'ta | nh', 'sigmoid', 0, True, True),
('not_use_bias', 'tanh', 'sigmoid', 0, False, False),
)
def test_could_use_defun_backend(self, activation, recurrent_activation,
recurrent_dropout, unroll, use_bias):
layer = rnn.LSTM(
1,
activation=activation,
recurrent_activation=recurrent_activation,
recurrent_dropout=recurrent_dropout,
unroll | =unroll,
use_bias=use_bias)
self.assertFalse(layer.could_use_cudnn)
def test_static_shape_inference_LSTM(self):
# Github issue: 15165
timesteps = 3
embedding_dim = 4
units = 2
model = keras.models.Sequential()
inputs = keras.layers.Dense(
embedding_dim, input_shape=(timesteps, embedding_dim))
model.add(inputs)
layer = rnn.LSTM(units, return_sequences=True)
model.add(layer)
outputs = model.layers[-1].output
self.assertEqual(outputs.shape.as_list(), [None, timesteps, units])
def test_dynamic_behavior_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = rnn.LSTM(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(gradient_descent.GradientDescentOptimizer(0.001), 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_stacking_LSTM(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(rnn.LSTM(10, return_sequences=True, unroll=False))
model.add(rnn.LSTM(5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_LSTM(self):
layer_class = rnn.LSTM
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_specify_initial_state_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
layer = rnn.LSTM(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
self.assertTrue(
any(initial_state[0] is t
for t in layer._inbound_nodes[0].input_tensors))
model = keras.models.Model([inputs] + initial_state, output)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def test_specify_initial_state_non_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with non-Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [
keras.backend.random_normal_variable((num_samples, units), 0, 1)
for _ in range(num_states)
]
layer = rnn.LSTM(units)
output = layer(inputs, initial_state=initial_state)
model = keras.models.Model(inputs, output)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
inputs = np.random.random((num_samples, timesteps, embedding_dim))
targets = np.random.random((num_samples, units))
model.train_on_batch(inputs, targets)
def test_reset_states_with_values(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
layer = rnn.LSTM(units, stateful=True)
layer.build((num_samples, timesteps, embedding_dim))
initial_weight_count = len(layer.weights)
layer.reset_states()
assert len(layer.states) == num_states
assert layer.states[0] is not None
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.zeros(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
state_shapes = [keras.backend.int_shape(state) for state in layer.states]
values = [np.ones(shape) for shape in state_shapes]
if len(values) == 1:
values = values[0]
layer.reset_states(values)
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.ones(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
# Test with invalid data
with self.assertRaises(ValueError):
layer.reset_states([1] * (len(layer.states) + 1))
self.assertEqual(initial_weight_count, len(layer.weights))
# Variables in "states" shouldn't show up in .weights
layer.states = nest.map_structure(variables.Variable, values)
layer.reset_states()
self.assertEqual(initial_weight_count, len(layer.weights))
def test_sp |
mozilla/elasticutils | elasticutils/tests/test_mlt.py | Python | bsd-3-clause | 3,116 | 0 | from nose.tools import eq_
from elasticutils import MLT
from elasticutils.tests import ESTestCase
|
class MoreLikeThisTest(ESTestCase):
data = [
{'id': 1, 'foo': 'bar', 'tag': 'awesome'},
{'id': 2, 'foo': 'bar', 't | ag': 'boring'},
{'id': 3, 'foo': 'bar', 'tag': 'awesome'},
{'id': 4, 'foo': 'bar', 'tag': 'boring'},
{'id': 5, 'foo': 'bar', 'tag': 'elite'},
{'id': 6, 'foo': 'notbar', 'tag': 'gross'},
{'id': 7, 'foo': 'notbar', 'tag': 'awesome'},
]
def test_bad_mlt(self):
"""Tests S or index and doc_type is specified."""
self.assertRaises(ValueError, lambda: MLT(1))
self.assertRaises(ValueError, lambda: MLT(1, index='foo'))
self.assertRaises(ValueError, lambda: MLT(1, doctype='foo'))
def test_mlt_on_foo(self):
"""Verify MLT with the foo field."""
# We need to pass min_term_freq and min_doc_freq, because the terms
# we are using are only once in each document.
mlt = MLT(1, self.get_s(), ['foo'], min_term_freq=1, min_doc_freq=1)
eq_(len(mlt), 4)
def test_mlt_on_foo_no_s(self):
"""Verify MLT with the foo field."""
index = self.get_s().get_indexes()[0]
doc_type = self.get_s().get_doctypes()[0]
es = self.get_s().get_es()
mlt = MLT(1, mlt_fields=['foo'], index=index, doctype=doc_type,
es=es, min_term_freq=1, min_doc_freq=1)
eq_(len(mlt), 4)
def test_mlt_on_tag(self):
"""Verify MLT with the tag field."""
# We need to pass min_term_freq and min_doc_freq, because the terms
# we are using are only once in each document.
mlt = MLT(1, self.get_s(), ['tag'], min_term_freq=1, min_doc_freq=1)
eq_(len(mlt), 2)
def test_mlt_on_two_fields(self):
"""Verify MLT on tag and foo fields."""
mlt = MLT(1, self.get_s(), ['tag', 'foo'],
min_term_freq=1, min_doc_freq=1)
eq_(len(mlt), 5)
def test_mlt_deprecated_fields(self):
with self.assertRaises(DeprecationWarning):
MLT(1, self.get_s(), fields=['tag', 'foo'])
def test_mlt_iter(self):
mlt = MLT(1, self.get_s(), ['tag', 'foo'],
min_term_freq=1, min_doc_freq=1)
eq_(len(list(mlt)), 5)
def test_mlt_on_foo_with_filter(self):
"""Verify MLT with the foo field while filtering on tag."""
# We need to pass min_term_freq and min_doc_freq, because the terms
# we are using are only once in each document.
mlt = MLT(1, self.get_s().filter(tag='boring'), ['foo'],
min_term_freq=1, min_doc_freq=1)
eq_(len(mlt), 2)
mlt = MLT(1, self.get_s().filter(tag='elite'), ['foo'],
min_term_freq=1, min_doc_freq=1)
eq_(len(mlt), 1)
mlt = MLT(1, self.get_s().filter(tag='awesome'), ['foo'],
min_term_freq=1, min_doc_freq=1)
eq_(len(mlt), 1)
mlt = MLT(1, self.get_s().filter(tag='gross'), ['foo'],
min_term_freq=1, min_doc_freq=1)
eq_(len(mlt), 0)
|
gperciva/artifastring | research/mode-detect/plot-harmonics.py | Python | gpl-3.0 | 3,185 | 0.009733 | #!/usr/bin/env python
import os.path
import sys
import glob
import numpy
import pylab
import expected_frequencies
import defs
import stft
import partials
try:
dirname = sys.argv[1]
except:
print "Need dirname, and optional maximum frequency"
try:
min_freq = float(sys.argv[2])
max_freq = float(sys.argv[3])
except:
max_freq = 0
max_freq = -1
HOPSIZE=defs.HOPSIZE
#SAMPLE_RATE=48000
#MAX_SECONDS = 3.0
#MAX_SECONDS = 0.5
MAX_SECONDS = 15
AXIS_Y_TOP = 0
AXIS_Y_BOTTOM = -160
def write_plot(base_filename):
filenames = glob.glob(os.path.join(
base_filename, "spectrum-*.txt"))
basename = base_filename.split('/')[-2]
wav_filename = os.path.split(
os.path.dirname(base_filename)
)[-1]
base_freq = expected_frequencies.get_freq_from_filen | ame(wav_filename)
filenames.sort()
Bs = numpy.loadtxt(os.path.join(base_filename, 'Bs.txt'))
SAMPLE_RATE, base_freq, B, limit, below, abo | ve = Bs
limit = int(limit)
num_harms = None
for i, filename in enumerate(filenames):
seconds = i*HOPSIZE / float(SAMPLE_RATE)
if seconds > MAX_SECONDS:
print "Reached time cutoff of %.1f" % MAX_SECONDS
return
print i, filename
fft = numpy.loadtxt(filename)
harms = numpy.loadtxt(filename.replace("spectrum-", "harms-"))
#noise = numpy.loadtxt(os.path.join(
# base_filename, "noise-floor.txt"))
outfilename = filename.replace("spectrum-","").replace(".txt", ".png")
freqs_estimate_int = [ i*base_freq for
i in range(1,limit+1)]
freqs_estimate_B = [ partials.mode_B2freq(base_freq, i, B) for
i in range(1,limit+1)]
# DEBUG for g string only
for j, freq in enumerate(freqs_estimate_int):
if j == 0:
pylab.axvline(freq, color="y", label="ideal freq.")
else:
pylab.axvline(freq, color="y")
for j, freq in enumerate(freqs_estimate_B):
low = stft.bin2hertz( stft.hertz2bin(freq, SAMPLE_RATE)
- below, SAMPLE_RATE)
high = stft.bin2hertz( stft.hertz2bin(freq,
SAMPLE_RATE) + above, SAMPLE_RATE)
if j == 0:
pylab.axvspan(low, high, color="c", alpha=0.3,
label="search range")
else:
pylab.axvspan(low, high, color="c", alpha=0.3)
pylab.plot(fft[:,0], fft[:,1])
pylab.plot(harms[:,0], harms[:,1], 'ro', label="peaks")
#pylab.semilogy(noise[:,0], noise[:,1], 'g-')
if num_harms is None:
num_harms = len(harms[:,0])
#pylab.xlim([0, (num_harms+3)*base_freq])
if max_freq > 0:
pylab.xlim([min_freq, max_freq])
pylab.ylim([AXIS_Y_BOTTOM, AXIS_Y_TOP])
pylab.xlabel("Frequency [Hz]")
pylab.ylabel("Amplitude [dB]")
pylab.title("Evolution of harmonics: %s\n%.3fs seconds" % (
basename, seconds))
#pylab.legend(bbox_to_anchor=(1.05, 1), loc=2)
pylab.legend()
pylab.savefig(outfilename)
pylab.close()
#pylab.show()
write_plot(dirname)
|
rexzhang/rpress | rpress/views/rpadmin/settings.py | Python | gpl-3.0 | 1,428 | 0.002101 | #!/usr/bin/env python
# coding=utf-8
import flask
from flask import flash
from flask_login import login_required
from rpress.models import SiteSetting
from rpress.database import db
from rpress.runtimes.rpadmin.template import render_template, navbar
from rpress.runtimes.current_session import get_current_site, get_current_site_info
from rpress.forms import SettingsForm
app = flask.Blueprint('rpadmin_setting', __name__)
@app.route('/', methods=['GET', ])
@login_required
@navbar(level1='settings')
def list():
content = {
'site': get_current_site_info(),
}
return render_template('rpadmin/settings/list.html', content=content)
@app.route('/<string:key>/edit', methods=['GET', 'POST'])
@login_required
@navbar(level1='settings')
def edit(key):
site = get_current_site()
site_setting = SiteSetting.query.filter_by(site=site, key=key).order_by('created_time').first()
if site | _setting is None:
site_set | ting = SiteSetting(
site_id=site.id,
key=key,
value=None,
)
form = SettingsForm(obj=site_setting)
if form.validate_on_submit():
form.populate_obj(site_setting)
db.session.add(site_setting)
db.session.commit()
flash("settings updated", "success")
else:
flash('settings edit error')
return render_template("rpadmin/settings/edit.html", form=form, site_setting=site_setting)
|
repodono/repodono.jobs | src/repodono/jobs/sanic.py | Python | gpl-2.0 | 5,298 | 0 | # -*- coding: utf-8 -*-
"""
Sanic implementation
"""
from sanic import response
from sanic import Blueprint
from random import getrandbits
class JobServer(object):
"""
A basic job server that will setup a couple routes.
The target usage is to encapsulate and expose a service that takes
some input argument and output some files onto some temporary dir,
which is to be provided by a specific job_manager implementation.
This job server will provide access to the functionality by the
above.
"""
def __init__(
self, job_manager,
base_url='/',
route_execute='execute',
route_poll='poll',
name=None, hook_start_stop=True):
"""
Takes in a subclass of job_manager, and provide some standard
ways of interfacing with it.
"""
self.job_manager = job_manager
self.base_url = base_url
self.route_execute = route_execute
self.route_poll = route_poll
self.name = name or '%s_%d' % (__name__, id(self))
self.blueprint = Blueprint(self.name)
self.setup(hook_start_stop)
def start(self, sanic, loop):
self.job_manager.start()
self.mapping = {}
def stop(self, sanic, loop):
self.job_manager.stop()
self.mapping.clear()
def _response(self, obj, **kwargs):
return response.json(obj, **kwargs)
def _report(self, error_msg=None, status_msg=None, status=200):
# shorthand to generate the standardized responses.
response = {}
if error_msg:
response['error'] = error_msg
if status_msg:
response['status'] = status_msg
return self._response(response, status=status)
def _error(self, error_msg=None, status_msg=None, status=400):
return self._report(
error_msg=error_msg, status_msg=status_msg, status=status)
def _generate_job_id(self):
return '%032x' % getrandbits(128)
def setup(self, hook_start_stop=True):
blueprint = self.blueprint
if hook_start_stop:
blueprint.listener('before_server_start')(self.start)
blueprint.listener('after_server_stop')(self.stop)
# The callables must be unbounded functions, given how they must
# fully be associated with the given blueprint. The only way to
# do so is to provide the following functions within the closure
# formed by this method; instance methods cannot have attributes
# assigned to it (can only be on the unbound method on the class
# definition itself).
route_execute = '/%s' % self.route_execute
route_poll = '/%s/<job_id:string>' % self.route_poll
route_poll_result = '%s/<key:string>' % route_poll
@blueprint.route(route_execute, methods=['POST'])
async def execute(request):
"""
The post end point for starting a job
"""
try:
kwargs = self.job_manager.verify_run_kwargs(
**dict(request.form))
except ValueError as e:
return self._error(error_msg=str(e))
working_dir = self.job_manager.run(**kwargs)
job_id = self._generate_job_id()
self.mapping[job_id] = working_dir
return self._response(
{
'status': 'created',
'location': '/%s/%s' % (self.route_poll, job_id),
},
headers={
| 'Location': '/%s/%s' % (self.route_poll, job_id),
},
| status=201,
)
@blueprint.route(route_poll)
async def poll(request, job_id):
if job_id not in self.mapping:
return self._error(error_msg='no such job_id', status=404)
# XXX whenever the API for dealing with the actual Popen
# objects (i.e. the actual polling) are done it should be
# used instead
working_dir = self.mapping[job_id]
process = self.job_manager.mapping[working_dir]
status = process.poll()
# TODO clean this up
if status is None:
return self._report(status_msg='running')
elif status != 0:
return self._error(
status_msg='failure',
error_msg='job execution terminated with an error',
)
else:
result = {'status': 'success'}
result['keys'] = self.job_manager.list_working_dir(
working_dir)
return self._response(result)
@blueprint.route(route_poll_result)
async def results(request, job_id, key):
if job_id not in self.mapping:
return self._error(error_msg='no such job_id', status=404)
working_dir = self.mapping[job_id]
target = self.job_manager.lookup_path(working_dir, key)
if target is None:
return self._error(
error_msg='no such key for job', status=404)
return await response.file(target)
def register(self, app):
app.blueprint(self.blueprint)
|
robocomp/learnbot | learnbot_dsl/functions/proprioceptive/base/near_to_target.py | Python | gpl-3.0 | 232 | 0.038793 | import ma | th as m
def near_to_target(lbot, targetX, targetY, nearDist = 50):
x, y, alpha = lbot.getPose()
distToTarget = m.sqrt(m.pow(x-targetX, 2) + m.pow(y-targetY, 2))
if distToTarget <= nearDist: |
return True
return False
|
armet/python-armet | armet/__init__.py | Python | mit | 363 | 0 | # -*- coding: utf-8 -*-
from __future__ im | port absolute_import, unicode_literals, division
from ._version import __version__, __version_info__ # noqa
from .decorators import route, resource, asynchronous
from .helpers import use
from .relationship import Relationship
__all__ = [
'route',
'resource',
'asynchronous',
| 'use',
'Relationship'
]
|
google/citest | citest/base/base_test_case.py | Python | apache-2.0 | 8,010 | 0.004494 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements BaseTestCase class.
The BaseTestCase class is derived from unittest.TestCase, providing some
boilerplate and common routines on top of it. The class makes some changes
to accommodate for differences in writing integration tests from unit tests.
In particular, whereas unit tests are cheap, can be run in any order and have
no side effects affecting other tests, integration tests may [intentionally]
have lasting side effects that other tests depend on due their cost to run.
The BaseTestCase adds command line argument processing using the standard
argparse.ArgumentParser. It produces a "binding" dictionary of all the
commandline argument key/value pairs (except keys are in upper case) that
then become available to tests so that their configuration can be easily
tweaked.
The BaseTestCase adds logging support where it can set up standard logging
configuration so that it logs details to persistent file that can be consulted
later without cluttering the console, which only displays warnings and errors.
Additionally, each entry is timestamped.
"""
# Standard python modules.
import inspect
import logging
import traceback
import unittest
from .journal_logger import JournalLogger
class _TestProcessingStep(object):
SETUP = 0
EXECUTE = 1
TEARDOWN = 2
class BaseTestCase(unittest.TestCase):
"""Base class for tests.
There isnt much here in the moment, but more may be added in the future.
This class is intended to be used in conjunction with the TestRunner,
though not strictly required. The test runner will call the
initArgumentParser method introduced by this base class to allow tests to
add custom bindings.
"""
@property
def logger(self):
"""Returns the logger for the fixture."""
return logging.getLogger(__name__)
def __init__(self, methodName='runTest'):
"""Construct instance.
Args:
methodName: [string] The method to run as defined by unittest.TestCase.
"""
# We're going to use the base class to invoke methods since
# it implements the protocol for calling setup/teardown and other
# workflow. However the base class doesnt provide hooks or means for
# us to distinguish before/after method calls or catch exceptions
# and failures or join the logs the way we are.
#
# To work around this, we'll wrap the fixtures method for this test with
# our own wrapper. Since the base class is based on the name of this method
# and uses it for reporting, we'll preserve the name, but overwrite the
# implementation with our wrapper, then have our wrapper delegate to the
# original intended method.
self.__method_name = methodName
self.__method = getattr(self, self.__method_name)
# The journal reporting relation for the test | execution.
self.__final_outcome_relation = None
self.__in_step = None
setattr(self, self.__method_name, self.__wrap_method)
super(BaseTestCase, self).__init__(methodName)
def __wrap_method(self):
# Wraps the calls to the actual test method so we have visibility.
#
# | When __call__ passes control to the base class, it will call this method
# after it has called setup. When we pass control back after this method,
# the base class will call teardown.
#
# Note this comment is not a string so that the TestRunner
# will not reflect on its comment.
self.__end_step_context(relation='VALID')
self.__in_step = _TestProcessingStep.SETUP
JournalLogger.begin_context('Execute')
self.__method()
JournalLogger.end_context(relation='VALID')
self.__in_step = _TestProcessingStep.TEARDOWN
self.__begin_step_context()
def __begin_step_context(self):
if (self.__in_step != _TestProcessingStep.SETUP
and self.__in_step != _TestProcessingStep.TEARDOWN):
raise ValueError('Unexpected step={0}'.format(self.__in_step))
JournalLogger.begin_context(
'setUp'
if self.__in_step == _TestProcessingStep.SETUP
else 'tearDown')
def __end_step_context(self, relation):
if self.__in_step is None:
return
JournalLogger.end_context(relation=relation)
def __trap_skip(self, delegate, test, reason):
self.__final_outcome_relation = 'VALID'
return delegate(test, reason)
def __trap_success(self, delegate, test):
self.__final_outcome_relation = 'VALID'
return delegate(test)
def __trap_failure(self, delegate, test, err):
self.__final_outcome_relation = 'INVALID'
return delegate(test, err)
def __trap_error(self, delegate, test, err):
self.__final_outcome_relation = 'ERROR'
error_details = '%s: %s' % (err[0], err[1])
trace = traceback.format_tb(err[2])
JournalLogger.journal_or_log_detail(
'Raised Exception', error_details, relation='ERROR',
levelno=logging.ERROR, format='pre', _logger=self.logger)
JournalLogger.journal_or_log_detail(
'Exception Trace', trace, relation='ERROR',
levelno=logging.DEBUG, format='pre', _logger=self.logger)
return delegate(test, err)
def __call__(self, *args, **kwargs):
"""Wraps the base class fixture heuristics that run an individual test."""
# It appears this is not standard.
# The default test passes args[0]
# but py.test passes kwargs 'result'
if args:
result = args[0]
else:
result = kwargs.get('result')
if result is None:
result = self.defaultTestResult()
do_skip = result.addSkip
do_success = result.addSuccess
do_error = result.addError
do_failure = result.addFailure
result.addSkip = lambda test, why: self.__trap_skip(do_skip, test, why)
result.addSuccess = lambda test: self.__trap_success(do_success, test)
result.addError = lambda test, err: self.__trap_error(do_error, test, err)
result.addFailure = (
lambda test, err: self.__trap_failure(do_failure, test, err))
method_name = self.__method_name
self.__in_step = _TestProcessingStep.SETUP
try:
doc = {'_doc': self.__method.__doc__} if self.__method.__doc__ else {}
JournalLogger.begin_context('Test "{0}"'.format(method_name),
**doc)
self.__begin_step_context()
super(BaseTestCase, self).__call__(result)
finally:
self.__end_step_context(relation=self.__final_outcome_relation)
self.__in_step = None
JournalLogger.end_context(relation=self.__final_outcome_relation)
def log_start_test(self, name=''):
"""Mark the beginning of a test in the log."""
if not name:
# The name of the function calling us.
name = str(inspect.stack()[1][3])
self.logger.debug('START %s', name,
extra={'citest_journal': {'nojournal': True}})
def log_end_test(self, name):
"""Mark the end of a test in the log."""
if not name:
# The name of the function calling us.
name = str(inspect.stack()[1][3])
underline = '-=' * 39 # separator between tests
self.logger.debug('END %s\n%s\n', name, underline,
extra={'citest_journal': {'nojournal': True}})
@classmethod
def initArgumentParser(cls, parser, defaults=None):
"""Adds arguments introduced by the BaseTestCase module.
Args:
parser: [argparse.ArgumentParser] instance to add to.
defaults: [dict] dictionary overriding default values.
"""
# pylint: disable=invalid-name
pass
|
louyihua/edx-platform | lms/djangoapps/instructor_task/tests/test_models.py | Python | agpl-3.0 | 4,163 | 0.001922 | """
Tests for instructor_task/models.py.
"""
import copy
from cStringIO import StringIO
import time
import boto
from django.conf import settings
from django.test import SimpleTestCase, override_settings
from mock import patch
from common. | test.utils import MockS3Mixin
from instructor_task.models import ReportStore
fr | om instructor_task.tests.test_base import TestReportMixin
from opaque_keys.edx.locator import CourseLocator
class ReportStoreTestMixin(object):
"""
Mixin for report store tests.
"""
def setUp(self):
super(ReportStoreTestMixin, self).setUp()
self.course_id = CourseLocator(org="testx", course="coursex", run="runx")
def create_report_store(self):
"""
Subclasses should override this and return their report store.
"""
pass
def test_links_for_order(self):
"""
Test that ReportStore.links_for() returns file download links
in reverse chronological order.
"""
report_store = self.create_report_store()
self.assertEqual(report_store.links_for(self.course_id), [])
report_store.store(self.course_id, 'old_file', StringIO())
time.sleep(1) # Ensure we have a unique timestamp.
report_store.store(self.course_id, 'middle_file', StringIO())
time.sleep(1) # Ensure we have a unique timestamp.
report_store.store(self.course_id, 'new_file', StringIO())
self.assertEqual(
[link[0] for link in report_store.links_for(self.course_id)],
['new_file', 'middle_file', 'old_file']
)
class LocalFSReportStoreTestCase(ReportStoreTestMixin, TestReportMixin, SimpleTestCase):
"""
Test the old LocalFSReportStore configuration.
"""
def create_report_store(self):
"""
Create and return a DjangoStorageReportStore using the old
LocalFSReportStore configuration.
"""
return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
@patch.dict(settings.GRADES_DOWNLOAD, {'STORAGE_TYPE': 's3'})
class S3ReportStoreTestCase(MockS3Mixin, ReportStoreTestMixin, TestReportMixin, SimpleTestCase):
"""
Test the old S3ReportStore configuration.
"""
def create_report_store(self):
"""
Create and return a DjangoStorageReportStore using the old
S3ReportStore configuration.
"""
connection = boto.connect_s3()
connection.create_bucket(settings.GRADES_DOWNLOAD['BUCKET'])
return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
class DjangoStorageReportStoreLocalTestCase(ReportStoreTestMixin, TestReportMixin, SimpleTestCase):
"""
Test the DjangoStorageReportStore implementation using the local
filesystem.
"""
def create_report_store(self):
"""
Create and return a DjangoStorageReportStore configured to use the
local filesystem for storage.
"""
test_settings = copy.deepcopy(settings.GRADES_DOWNLOAD)
test_settings['STORAGE_KWARGS'] = {'location': settings.GRADES_DOWNLOAD['ROOT_PATH']}
with override_settings(GRADES_DOWNLOAD=test_settings):
return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
class DjangoStorageReportStoreS3TestCase(MockS3Mixin, ReportStoreTestMixin, TestReportMixin, SimpleTestCase):
"""
Test the DjangoStorageReportStore implementation using S3 stubs.
"""
def create_report_store(self):
"""
Create and return a DjangoStorageReportStore configured to use S3 for
storage.
"""
test_settings = copy.deepcopy(settings.GRADES_DOWNLOAD)
test_settings['STORAGE_CLASS'] = 'storages.backends.s3boto.S3BotoStorage'
test_settings['STORAGE_KWARGS'] = {
'bucket': settings.GRADES_DOWNLOAD['BUCKET'],
'location': settings.GRADES_DOWNLOAD['ROOT_PATH'],
}
with override_settings(GRADES_DOWNLOAD=test_settings):
connection = boto.connect_s3()
connection.create_bucket(settings.GRADES_DOWNLOAD['STORAGE_KWARGS']['bucket'])
return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
|
PhilHarnish/forge | src/data/graph/multi/multi_walk.py | Python | mit | 1,190 | 0.008403 | import collections
from typing import Dict, Iterable, List, Union
from data import types
from data.graph import bloom_node, walk as walk_internal
Expression = bloom_node.BloomNode
Expressions = Union[List[Expression], Dict[str, Expression]]
WeightedWords = Union[List[types.WeightedWord], Dict[str, types.WeightedWord]]
_EXHAUSTED = {}
class ResultSet(dict):
def __init__(self, values: WeightedWords) -> None:
if isinstance(values, list):
kwargs = enumerate(values)
else:
kwargs = values
super(ResultSet, self).__init__(kwargs)
class Results(object):
def __init__(self, expressions: Expressions) -> None:
if isinstance(expressions, list):
expressions = collections.OrderedDict(enumerate(expressions))
self._expressions = expressions
def _ | _iter__(self) -> Iterable[ResultSet]:
if not self._expressions:
return
sources = [(k, walk_internal.walk(v)) for k, v in self._expressions.items()]
values = [(k, next(v, _EXHAUSTED)) for k, v in sources]
if not any(v is _EXHAUSTED for _, v in values):
yield ResultSet(dict(values))
def walk(expressions: Expressions) -> Iterable[ResultSet]:
return Results(expressi | ons)
|
carlosb/scicomp | scicomp/rootfind/rootfindpack.py | Python | gpl-3.0 | 11,013 | 0 | """
Root finding methods
====================
Routines in this module:
bisection(f, a, b, eps=1e-5)
newton1(f, df, eps=1e-5)
newtonn(f, J, x0, eps=1e-5)
secant(f, x0, x1, eps=1e-5)
inv_cuadratic_interp(f, a, b, c, eps=1e-5)
lin_fracc_interp(f, a, b, c, eps=1e-5)
broyden(f, x0, B0, eps=1e-5)
"""
import numpy as np
'''
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
Copyright (C) 4/24/17 Carlos Brito
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.*
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
'''
__all__ = ['bisection', 'newton1', 'secant', 'newtonn',
'inv_cuadratic_interp', 'lin_fracc_interp',
'brent']
def bisection(f, a, b, eps=1e-5, display=False):
"""
Find root of f.
This function computes a root of the function f using the bisection method.
Parameters
----------
f : function
Function we want to find the root of.
a : float
Lower bound.
b : float
High bound.
eps : float
Tolerance.
Returns
-------
m : float
Root of f.
iterations : int
Number of iterations taken to find root.
"""
iterations = 0
if a > b:
a, b = b, a
while((b - a) > eps):
m = a + np.float32(b - a) / 2.
if (np.sign(f(a)) == np.sign(f(m))):
a = m
else:
b = m
if display:
print 'iteration ', iterations
print 'm: ', m
iterations += 1
return m, iterations
def newton1(f, df, x0, eps=1e-5, display=False):
"""
Find root of f.
This method computes the root of f using Newton's method.
Parameters
----------
f : function
Function we want to find the root of.
df : function
Derivative of f.
x0 : float
This is the starting point for the method.
eps : float
Tolerance.
Returns
-------
root : float
Root of f.
"""
iterations = 0
x_old = np.float(x0)
x_new = x_old
while(True):
try:
x_old = x_new
x_new = x_old - f(x_old) / df(x_old)
if display:
print 'iteration ', iterations
print 'x: ', x_new
iterations += 1
| if(abs(x_old - x_new) <= eps):
break
except(ZeroDivisionError):
return np.nan
root = x_new
return root, iterations
def secant(f, x0, x1, eps=1e-5, display=False):
"""
Parameters
----------
f : f | unction
Function we want to find the root of.
x0 : float
First initial value "close" to the root of f.
x1: float
Second initial value "close" to the root of f.
eps : float
Tolerance.
Returns
-------
root : float
Root of f.
iterations : int
Number of iterations taken to find root.
"""
iterations = 0
x_old_0 = x0
x_old_1 = x1
x_new = x0 - f(x0) * (x1 - x0) / (f(x1) - f(x0))
while True:
x_old_0 = x_old_1
x_old_1 = x_new
x_new = x_old_1 - f(x_old_1) * \
((x_old_1 - x_old_0) / (f(x_old_1) - f(x_old_0)))
if display:
print 'iteration ', iterations
print 'x: ', x_new
iterations += 1
if(abs(x_old_1 - x_new) < eps):
break
root = x_new
return root, iterations
def inv_cuadratic_interp(f, a, b, c, eps=1e-5, display=False):
"""
Find root of f.
This method finds the root of f using the inverse cuadratic
interpolation method.
Parameters
----------
f : function
Function we want to find the root of.
a : float
First initial value.
b : float
Second initial value.
c : float
Third initial value.
Returns
-------
root : float
Root of f.
iterations : int
Number of iterations taken to find root.
"""
iterations = 0
while True:
u = f(b) / f(c)
v = f(b) / f(a)
w = f(a) / f(c)
p = v * (w * (u - w) * (c - b) - (1 - u) * (b - a))
q = (w - 1) * (u - 1) * (v - 1)
x_new = b + p / q
a = b
b = c
c = x_new
if display:
print 'iteration ', iterations
print 'x: ', x_new
iterations += 1
if(abs(f(x_new)) < eps):
break
root = x_new
return root, iterations
def lin_fracc_interp(f, a, b, c, eps=1e-5, display=False):
"""
Find root of f.
This method finds the root of f using the linear fractional
interpolation method.
Parameters
----------
f : function
Function we want to find the root of.
a : float
First initial value.
b : float
Second initial value.
c : float
Third initial value.
Returns
-------
root : float
Root of f.
iterations : int
Number of iterations taken to find root.
"""
iterations = 0
while True:
numerator = (a - c) * (b - c) * (f(a) - f(b)) * f(c)
denominator = (a - c) * (f(c) - f(b)) * f(a) - \
(b - c) * (f(c) - f(a)) * f(b)
h = numerator / denominator
x_new = c + h
a = b
b = c
c = x_new
if display:
print 'iteration ', iterations
print 'x: ', x_new
iterations += 1
if(abs(f(x_new)) < eps):
break
root = x_new
return root, iterations
def broyden(f, x0, B0, eps=1e-5, display=False):
"""
Finds roots for functions of k-variables.
This function utilizes Broyden's method to find roots in a
k-dimensional function f utilizing the initial Jacobian B0
at x0.
Parameters
----------
f : function which takes an array_like matrix and
returns an array_like matrix
Function we want to find the root of.
x0 : array_like
Initial point.
B0 : array_like
Jacobian of function at x0.
eps : float
Error tolerance.
Returns
-------
root : array_like
Root of function.
iterations : int
Number of iterations taken to find root.
"""
iterations = 0
x_new = x0
B_new = B0
while True:
x_old = x_new
B_old = B_new
s = np.dot(np.linalg.inv(B_old), -f(x_old).T) # solve for s
x_new = x_old + s
y = f(x_new) - f(x_old)
B_new = B_old + (np.dot((y - np.dot(B_old, s)), s.T)
) / (np.dot(s.T, s))
if display:
print 'iteration ', iterations
print 'x:', x_new
print 'B', B_new
iterations += 1
# convergence check
if(np.all(np.abs(x_old - x_new) <= eps)):
break
root = x_new
return root, iterations
def newtonn(f, J, x0, eps=1e-5, display=False):
"""
Finds roots for functions of k-variables.
This function utilizes Newton's method for root finding
to find roots in a k-dimensional function. To do this,
it takes the Jacobian of the function and an initial
point.
Parameters
----------
f : function which takes an array_like matrix and
returns an array_like matrix
J : function returning an array_like matrix
Jacobian of function.
x0 : array_like
Initial point.
eps : float
Error tolerance.
Returns
-------
root : array_like
Root of function.
iterations : int
Number of iterations take |
artyomboyko/log-analysis | log_reader.py | Python | mit | 2,330 | 0.002575 | import time
import asyncio
from aiokafka import AIOKafkaProducer
from settings import KAFKA_SERVERS, SAVEPOINT, LOG_FILE, KAFKA_TOPIC
class LogStreamer:
def __init__(self,
KAFKA_SERVERS,
KAFKA_TOPIC,
loop,
savepoint_file,
log_file):
self.KAFKA_TOPIC = KAFKA_TOPIC
self.loop = loop
self.producer = AIOKafkaProducer(loop=self.loop, bootstra | p_s | ervers=KAFKA_SERVERS)
self.savepoint_file = savepoint_file
self.log_file = log_file
async def produce(self, finite=False):
last = self.savepoint_file.read()
if last:
self.log_file.seek(int(last))
skip_first_empty = True
while True:
line = self.log_file.readline()
line = line.strip(' \t\n\r')
if not line:
if finite and not skip_first_empty:
return
skip_first_empty = False
time.sleep(0.1)
current_position = self.log_file.tell()
if last != current_position:
self.savepoint_file.seek(0)
self.savepoint_file.write(str(current_position))
continue
'''
Here we can convert our data to JSON. But I because JSON performance is not extremely good
with standart libraries, and because we use asynchronous non-blocking model here, I think it's
best to just pass data as is. I want to create as little as possible overhead here. We want to
stream data as fast as possible.
'''
await self.producer.send_and_wait(self.KAFKA_TOPIC, line.encode())
def start(self):
self.loop.run_until_complete(self.producer.start())
self.loop.run_until_complete(self.produce())
self.loop.run_until_complete(self.producer.stop())
self.loop.close()
if __name__ == '__main__':
with open(SAVEPOINT, 'r+') as savepoint_file, open(LOG_FILE, 'r') as log_file:
streamer = LogStreamer(KAFKA_SERVERS,
KAFKA_TOPIC,
asyncio.get_event_loop(),
savepoint_file,
log_file)
streamer.start()
|
JoaquimPatriarca/senpy-for-gis | gasp/frompsql.py | Python | gpl-3.0 | 3,280 | 0.014024 | """
PostgreSQL Database data to Python Object/Array
"""
from gasp.pgsql import connection
def sql_query(conParam, query, encoding=None):
"""
Retrive data from a SQL query
"""
conn = connection(conParam)
if encoding:
conn.set_client_encoding(encoding)
cursor = conn.cursor()
cursor.execute(query)
table = cursor.fetchall()
cursor.close()
conn.close()
return table
def pgtable_to_dict(pgTable, pgCon, sanitizeColsName=True, cols=None):
"""
PG TABLE DATA to Python dict
"""
from gasp.pgsql.fields import get | _columns_name
from gasp import goToList
cols = get_columns_name(pgCon, pgTable) if not cols else \
goToList(cols)
data = sql_query(
pgCon,
'SELECT {cols_} FROM {table}'.format(
cols_=', '.join(cols),
table=pgTable
)
)
if no | t sanitizeColsName:
from gasp.pgsql import pgsql_special_words
for i in range(len(cols)):
if cols[i][1:-1] in pgsql_special_words():
cols[i] = cols[i][1:-1]
return [
{cols[i] : row[i] for i in range(len(cols))} for row in data
]
def sql_query_with_innerjoin(
dic4con, main_table_lst, relation_table_lst,
fld_of_interest, obj_of_interest, fields_to_select):
"""
Select data based on it relation with something
Applies the SQL INNERJOIN method
* Table lists objects:
[0] is the table name
[1] is the foreign key column name
"""
main_table, main_foreign = main_table_lst
relate_table, relate_foreign = relation_table_lst
con = connection(dic4con)
obj_o_int = '\'{}\''.format(str(obj_of_interest)) if type(obj_of_interest) == str \
or type(obj_of_interest) == unicode else str(obj_of_interest)
table = sql_query(
dic4con,
("SELECT {cols} FROM {m} INNER JOIN {r} ON {m}.{fld1_join} "
"= {r}.{fld2_join} WHERE {fld}={c};").format(
cols = ','.join(fields_to_select),
m = main_table, fld1_join = main_foreign,
r=relate_table, fld2_join = relate_foreign,
fld = fld_of_interest, c = obj_o_int
)
)
if len(fields_to_select) == 1:
from itertools import chain
l = list(chain.from_iterable(table))
elif len(fields_to_select) == 2:
l = dict(table)
elif len(fields_to_select) > 2:
l = dict([[k[0], k[1:]] for k in table])
return l
def sql_to_df(conParam, query):
"""
Query database and convert data to Pandas Dataframe
"""
import pandas
from gasp.sqalchemy import get_psql_engine
pgengine = get_psql_engine(conParam)
df = pandas.read_sql(query, pgengine, columns=None)
return df
def psql_to_geodf(conParam, query, geomCol='geom',
epsg=None):
"""
Query database and convert data to Pandas GeoDataframe
"""
from geopandas import GeoDataFrame
from gasp.pgsql import connection
con = connection(conParam)
df = GeoDataFrame.from_postgis(
query, con, geom_col=geomCol,
crs="epsg:{}".format(str(epsg)) if epsg else None
)
return df
|
SedFoam/sedfoam | tutorials/Py/plot_tuto1DBedLoadTurb.py | Python | gpl-2.0 | 3,062 | 0.010124 | import subprocess
import sys
import numpy as np
import fluidfoam
import matplotlib.pyplot as plt
plt.ion()
############### Plot properties #####################
import matplotlib.ticker as mticker
from matplotlib.ticker import StrMethodFormatter, NullFormatter
from matplotlib import rc
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
label_size = 20
legend_size = 12
fontsize=25
linewidth=2
plt.rcParams['xtick.labelsize'] = label_size
plt.rcParams['ytick.labelsize'] = label_size
plt.rcParams['legend.fontsize'] = legend_size
plt.rcParams['lines.linewidth'] = linewidth
plt.rcParams['axes.labelsize'] = fontsize
####################################################
#################### | ##
# Load DEM data
######################
zDEM, phiDEM, vxPDEM, vxFDEM, TDEM = np.loadtxt('DATA/BedloadTurbDEM.txt', unpack=True)
######################
#Read SedFoam results
######################
sol = '../1DBedLoadTurb/'
try:
proc = subprocess.Popen(
["foamListTimes", "-latestTime", "-case", sol],
stdout=subprocess.PIPE,)
except:
print("foamListTimes : command not found")
print("Do you have load OpenFoam environement?")
sys.exit(0)
output = pr | oc.stdout.read() #to obtain the output of function foamListTimes from the subprocess
timeStep = output.decode().rstrip().split('\n')[0] #Some management on the output to obtain a number
#Read the data
X, Y, Z = fluidfoam.readmesh(sol)
z = Y
phi = fluidfoam.readscalar(sol, timeStep, 'alpha_a')
vxPart = fluidfoam.readvector(sol, timeStep, 'Ua')[0]
vxFluid = fluidfoam.readvector(sol, timeStep, 'Ub')[0]
T = fluidfoam.readscalar(sol, timeStep, 'Theta')
######################
#Plot results
######################
d = 0.006 #6mm diameter particles
plt.figure(figsize=[10,5])
plt.subplot(141)
plt.plot(phiDEM, zDEM/d, 'k--', label=r'DEM')
plt.plot(phi, z/d, label=r'SedFoam')
plt.xlabel(r'$\phi$', fontsize=25)
plt.ylabel(r'$\frac{z}{d}$', fontsize=30, rotation=True, horizontalalignment='right')
plt.grid()
plt.ylim([-1.525, 32.025])
plt.legend()
plt.subplot(142)
I = np.where(phiDEM>0.001)[0]
plt.plot(vxPDEM[I], zDEM[I]/d, 'r--')
I = np.where(phi>0.001)[0]
plt.plot(vxPart[I], z[I]/d, 'r', label=r'$v_x^p$')
plt.plot(vxFDEM, zDEM/d, 'b--')
plt.plot(vxFluid, z/d, 'b', label=r'$u_x^f$')
plt.xlabel(r'$v_x^p$, $u_x^f$', fontsize=25)
plt.ylim([-1.525, 32.025])
plt.grid()
plt.legend()
ax = plt.gca()
ax.set_yticklabels([])
plt.legend()
plt.subplot(143)
plt.plot(phiDEM*vxPDEM, zDEM/d, 'k--', label=r'DEM')
plt.plot(phi*vxPart, z/d, label=r'SedFoam')
plt.xlabel(r'$q = \phi v_x^p$', fontsize=25)
plt.grid()
plt.ylim([-1.525, 32.025])
ax = plt.gca()
ax.set_yticklabels([])
plt.subplot(144)
I = np.where(phiDEM>0.001)[0]
plt.plot(TDEM[I], zDEM[I]/d, 'k--', label=r'DEM')
I = np.where(phi>0.001)[0]
plt.plot(T[I], z[I]/d, label=r'SedFoam')
plt.xlabel(r'$T$', fontsize=25)
plt.grid()
plt.ylim([-1.525, 32.025])
ax = plt.gca()
ax.set_yticklabels([])
plt.savefig('Figures/res_TutoBedloadTurb.png', bbox_inches='tight')
plt.show(block=True)
|
adammaikai/OmicsPipe2.0 | omics_pipe/modules/RNAseq_QC.py | Python | mit | 1,409 | 0.036196 | #!/usr/bin/env python
from omics_pipe.parameters.default_parameters import default_parameters
from omics_pipe.utils import *
p = Bunch(default_parameters)
def RNAseq_QC(sample, RNAseq_QC_flag):
'''Runs picard rnaseqmetrics and insertsize estimation
input:
.bam
output:
pdf plot
link:
parameters from parameters file:
STAR_RESULTS:
QC_PATH:
BAM_FILE_NAME:
RSEQC_REF:
TEMP_DIR:
PICARD_VERSION:
| R_VERSION:
'''
spawn_job(jobname = 'RNAseq_QC', SAMPLE = sample, LOG_PATH = p.OMICSPIPE["LOG_PATH"], RESULTS_EMAIL = p.OMICSPIPE["EMAIL"], SCHEDULER = p.OMICSPIPE["SCHEDULER"], walltime = p.RNASEQQC["WALLTIME"], queue = p.OMICSPIPE["QUEUE"], nodes = p.RNASEQQC["NODES"], ppn = p.RNASEQQC["CPU"], memory = p.RNASEQQC["MEMORY"], script = "/RNAseq_QC.sh", args_list = [p.RNASEQQC["ALIGNMENT_DIR"], p.RNASEQQC["RESULTS"], p.RNASEQQC["BAM_FILE_NAME"], p | .RNASEQQC["REFFLAT"], p.RNASEQQC["TEMP_DIR"], sample, p.RNASEQQC["PICARD_VERSION"], p.RNASEQQC["R_VERSION"]])
job_status(jobname = 'RNAseq_QC', resultspath = p.RNASEQQC["RESULTS"], SAMPLE = sample, outputfilename = sample + "/insertSizeHist.pdf", FLAG_PATH = p.OMICSPIPE["FLAG_PATH"])
return
if __name__ == '__main__':
RNAseq_QC(sample, RNAseq_QC_flag)
sys.exit(0)
|
claudep/pootle | pootle/apps/pootle_app/migrations/0015_add_tp_path_idx.py | Python | gpl-3.0 | 470 | 0.002128 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 20 | 16-11-04 16:36
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pootle_app', '0014_set_directory_tp_path'),
]
operations = [
migrations.AlterIndexTogether(
name='directory',
index_together=set([('obsolete', 'tp', 'tp_path'), ('obsolete', 'pootle_path' | )]),
),
]
|
janezhango/BigDataMachineLearning | py/testdir_single_jvm/test_exec2_enums_rand_cut.py | Python | apache-2.0 | 11,650 | 0.006867 | import unittest, random, sys, time, re, getpass
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_glm, h2o_util
import h2o_print as h2p, h2o_gbm
# details:
# we want to seed a random dictionary for our enums
# string.ascii_uppercase string.printable string.letters string.digits string.punctuation string.whitespace
# restricting the choices makes it easier to find the bad cases
randChars = "abeE01" + "$%+-.;|\t "
randChars = "abeE01" # bad..causes NAification. probably 1E0e is causing a problem
# randChars = "abfF01" # try this.. fails
# randChars = "abcdef" #
quoteChars = "\'\""
# don't use any quote characters. We'd have to protect combinations
quoteChars = ""
MIN_ENUM_WIDTH = 2
MAX_ENUM_WIDTH = 8
RAND_ENUM_LENGTH = True
CUT_EXPR_CNT = 200
ROWS=1000000
DO_PLOT = getpass.getuser()=='kevin'
DO_MEDIAN = True
MAX_QBINS = 1000
MULTI_PASS = 1
def random_enum(n, randChars=randChars, quoteChars=quoteChars):
# randomly return None 10% of the time
# if random.randint(0,9)==0:
# return 'huh' # empty string doesn't work for exec compare?
choiceStr = randChars + quoteChars
mightBeNumberOrWhite = True
while mightBeNumberOrWhite:
# H2O doesn't seem to tolerate random single or double quote in the first two rows.
# disallow that by not passing quoteChars for the first two rows (in call to here)
r = ''.join(random.choice(choiceStr) for x in range(n))
mightBeNumberOrWhite = h2o_util.might_h2o_think_number_or_whitespace(r)
return r
def create_enum_list(n=4, **kwargs):
# Allowing length one, we sometimes form single digit numbers that cause the whole column to NA
# see DparseTask.java for this effect
# FIX! if we allow 0, then we allow NA?. I guess we check for no missing, so can't allow NA
# too many retries allowing 1. try 2 min.
if RAND_ENUM_LENGTH:
enumList = [random_enum(n=random.randint(MIN_ENUM_WIDTH, MAX_ENUM_WIDTH), **kwargs) for i in range(n)]
else:
# a fixed width is sometimes good for finding badness
enumList = [random_enum(n=MAX_ENUM_WIDTH, **kwargs) for i in range(n)]
return enumList
def create_col_enum_list(inCount):
# create the per-column choice lists
colEnumList = []
for col in range(inCount):
enumList = create_enum_list(n=random.randint(1,4), quoteChars=quoteChars)
colEnumList.append(enumList)
return colEnumList
def write_syn_dataset(csvPathname, rowCount, inCount=1, outCount=1, SEED='12345678',
colSepChar=",", rowSepChar="\n", quoteChars="", colEnumList=None):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for row in range(rowCount):
# doesn't guarantee that 10000 rows have 10000 unique enums in a | column
# essentially sampling with replacement
rowData = []
for iCol in range(inCount):
# FIX! | we should add some random NA?
ri = random.choice(colEnumList[iCol])
rowData.append(ri)
# output columns. always 0-10e6 with 2 digits of fp precision
for oCol in range(outCount):
ri = "%.2f" % random.uniform(0, 10e6)
rowData.append(ri)
# use the new Hive separator
rowDataCsv = colSepChar.join(map(str,rowData)) + rowSepChar
### sys.stdout.write(rowDataCsv)
dsf.write(rowDataCsv)
dsf.close()
return colEnumList
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1,java_heap_GB=14)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
# h2o.sleep(3600)
h2o.tear_down_cloud()
def test_exec_enums_rand_cut(self):
h2o.beta_features = True
SYNDATASETS_DIR = h2o.make_syn_dir()
n = ROWS
tryList = [
(n, 10, 9, 'cE', 300),
]
# create key names to use for exec
eKeys = ['e%s' % i for i in range(10)]
# h2b.browseTheCloud()
trial = 0
for (rowCount, iColCount, oColCount, hex_key, timeoutSecs) in tryList:
colCount = iColCount + oColCount
hex_key = 'p'
colEnumList = create_col_enum_list(iColCount)
# create 100 possible cut expressions here, so we don't waste time below
rowExprList = []
for i in range(CUT_EXPR_CNT):
print "Creating", CUT_EXPR_CNT, 'cut expressions'
# init cutValue. None means no compare
cutValue = [None for i in range(iColCount)]
# build up a random cut expression
cols = random.sample(range(iColCount), random.randint(1,iColCount))
for c in cols:
# possible choices within the column
cel = colEnumList[c]
# for now the cutValues are numbers for the enum mappings
if 1==1:
# FIX! hack. don't use encoding 0, maps to NA here? h2o doesn't like
celChoice = str(random.choice(range(len(cel))))
else:
celChoice = random.choice(cel)
cutValue[c] = celChoice
cutExprList = []
for i,c in enumerate(cutValue):
if c is None:
continue
else:
# new ...ability to reference cols
# src[ src$age<17 && src$zip=95120 && ... , ]
cutExprList.append('p$C'+str(i+1)+'=='+c)
cutExpr = ' && '.join(cutExprList)
print "cutExpr:", cutExpr
# should be two different keys in the sample
e = random.sample(eKeys,2)
fKey = e[0]
eKey = e[1]
rowExpr = '%s[%s,];' % (hex_key, cutExpr)
print "rowExpr:", rowExpr
rowExprList.append(rowExpr)
# CREATE DATASET*******************************************
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = 'syn_enums_' + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
write_syn_dataset(csvPathname, rowCount, iColCount, oColCount, SEEDPERFILE, colEnumList=colEnumList)
# PARSE*******************************************************
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=30)
print "Parse result['destination_key']:", parseResult['destination_key']
inspect = h2o_cmd.runInspect(key=parseResult['destination_key'])
h2o_cmd.infoFromInspect(inspect, csvPathname)
# print h2o.dump_json(inspect)
(missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \
h2o_cmd.columnInfoFromInspect(parseResult['destination_key'], exceptionOnMissingValues=False)
# error if any col has constant values
if len(constantValuesDict) != 0:
raise Exception("Probably got a col NA'ed and constant values as a result %s" % constantValuesDict)
# INIT all possible key names used***************************
# remember. 1 indexing!
# is this needed?
if 1==1:
a = 'a=c(1,2,3);' + ';'.join(['a[,%s]=a[,%s-1]'% (i,i) for i in range(2,colCount)])
print a
for eKey in eKeys:
# build up the columns
e = h2o.nodes[0].exec_query(str='%s;%s=a' % (a, eKey), print_params=False)
## print h2o.dump_json(e)
xList = []
eList = []
fList = []
for rep |
aequitas/home-assistant | homeassistant/components/sensibo/climate.py | Python | apache-2.0 | 12,247 | 0 | """Support for Sensibo wifi-enabled home thermostats."""
import asyncio
import logging
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components.climate import ClimateDevice, PLATFORM_SCHEMA
from homeassistant.components.climate.const import (
DOMAIN, SUPPORT_TARGET_TEMPERATURE, SUPPORT_OPERATION_MODE,
SUPPORT_FAN_MODE, SUPPORT_SWING_MODE,
SUPPORT_ON_OFF, STATE_HEAT, STATE_COOL, STATE_FAN_ONLY, STATE_DRY,
STATE_AUTO)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_STATE, ATTR_TEMPERATURE, CONF_API_KEY, CONF_ID,
STATE_ON, STATE_OFF, TEMP_CELSIUS, TEMP_FAHRENHEIT)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.util.temperature import convert as convert_temperature
_LOGGER = logging.getLogger(__name__)
ALL = ['all']
TIMEOUT = 10
SERVICE_ASSUME_STATE = 'sensibo_assume_state'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_ID, default=ALL): vol.All(cv.ensure_list, [cv.string]),
})
ASSUME_STATE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_STATE): cv.string,
})
_FETCH_FIELDS = ','.join([
'room{name}', 'measurements', 'remoteCapabilities',
'acState', 'connectionStatus{isAlive}', 'temperatureUnit'])
_INITIAL_FETCH_FIELDS = 'id,' + _FETCH_FIELDS
FIELD_TO_FLAG = {
'fanLevel': SUPPORT_FAN_MODE,
'mode': SUPPORT_OPERATION_MODE,
'swing': SUPPORT_SWING_MODE,
'targetTemperature': SUPPORT_TARGET_TEMPERATURE,
'on': SUPPORT_ON_OFF,
}
SENSIBO_TO_HA = {
"cool": STATE_COOL,
"heat": STATE_HEAT,
"fan": STATE_FAN_ONLY,
"auto": STATE_AUTO,
"dry": STATE_DRY
}
HA_TO_SENSIBO = {value: key for key, value in SENSIBO_TO_HA.items()}
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up Sensibo devices."""
import pysensibo
client = pysensibo.SensiboClient(
config[CONF_API_KEY], session=async_get_clientsession(hass),
timeout=TIMEOUT)
devices = []
try:
for dev in (
await client.async_get_devices(_INITIAL_FETCH_FIELDS)):
if config[CONF_ID] == ALL or dev['id'] in config[CONF_ID]:
devices.append(SensiboClimate(
client, dev, hass.config.units.temperature_unit))
except (aiohttp.client_exceptions.ClientConnectorError,
asyncio.TimeoutError, pysensibo.SensiboError):
_LOGGER.exception('Failed to connect to Sensibo servers.')
raise PlatformNotReady
if devices:
async_add_entities(devices)
async def async_assume_state(service):
"""Set state according to external service call.."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_climate = [device for device in devices
if device.entity_id in entity_ids]
else:
target_climate = devices
update_tasks = []
for climate in target_climate:
await climate.async_assume_state(
service.data.get(ATTR_STATE))
update_tasks.append(climate.async_update_ha_state(True))
if update_tasks:
await asyncio.wait(update_tasks)
hass.services.async_register(
DOMAIN, SERVICE_ASSUME_STATE, async_assume_state,
schema=ASSUME_STATE_SCHEMA)
class SensiboClimate(ClimateDevice):
"""Representation of a Sensibo device."""
def __init__(self, client, data, units):
"""Build SensiboClimate.
client: aiohttp session.
data: initially-fetched data.
"""
self._client = client
self._id = data['id']
self._external_state = None
self._units = units
self._available = False
self._do_update(data)
@property
def supported_features(self):
| """Return the list of supported features."""
return self._supported_features
def _do_update(self, data):
self._name = data['room']['name']
self._measurements = data['measurements']
self._ac_states = data['acState']
self._available = data['connectionStatus']['isAlive']
capabilities = data['remoteCapabilities']
self._operations = [SENSIBO_TO_HA[mode] for mode
in capabilities['modes']]
self._current_c | apabilities = \
capabilities['modes'][self._ac_states['mode']]
temperature_unit_key = data.get('temperatureUnit') or \
self._ac_states.get('temperatureUnit')
if temperature_unit_key:
self._temperature_unit = TEMP_CELSIUS if \
temperature_unit_key == 'C' else TEMP_FAHRENHEIT
self._temperatures_list = self._current_capabilities[
'temperatures'].get(temperature_unit_key, {}).get('values', [])
else:
self._temperature_unit = self._units
self._temperatures_list = []
self._supported_features = 0
for key in self._ac_states:
if key in FIELD_TO_FLAG:
self._supported_features |= FIELD_TO_FLAG[key]
@property
def state(self):
"""Return the current state."""
return self._external_state or super().state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {'battery': self.current_battery}
@property
def temperature_unit(self):
"""Return the unit of measurement which this thermostat uses."""
return self._temperature_unit
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._ac_states.get('targetTemperature')
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
if self.temperature_unit == self.hass.config.units.temperature_unit:
# We are working in same units as the a/c unit. Use whole degrees
# like the API supports.
return 1
# Unit conversion is going on. No point to stick to specific steps.
return None
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return SENSIBO_TO_HA.get(self._ac_states['mode'])
@property
def current_humidity(self):
"""Return the current humidity."""
return self._measurements['humidity']
@property
def current_battery(self):
"""Return the current battery voltage."""
return self._measurements.get('batteryVoltage')
@property
def current_temperature(self):
"""Return the current temperature."""
# This field is not affected by temperatureUnit.
# It is always in C
return convert_temperature(
self._measurements['temperature'],
TEMP_CELSIUS,
self.temperature_unit)
@property
def operation_list(self):
"""List of available operation modes."""
return self._operations
@property
def current_fan_mode(self):
"""Return the fan setting."""
return self._ac_states.get('fanLevel')
@property
def fan_list(self):
"""List of available fan modes."""
return self._current_capabilities.get('fanLevels')
@property
def current_swing_mode(self):
"""Return the fan setting."""
return self._ac_states.get('swing')
@property
def swing_list(self):
"""List of available swing modes."""
return self._current_capabilities.get('swing')
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def is_on(self):
"""Return true if AC is on.""" |
Sophist-UK/sophist-picard-plugins | sort_multivalue_tags.py | Python | gpl-2.0 | 1,718 | 0.00291 | # -*- coding: utf-8 -*-
# This is the Sort Multivalue Tags plugin for MusicBrainz Picard.
# Copyright (C) 2013 Sophist
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
PLUGIN_NAME = u"Sort Multi-Value Tags"
PLUGIN_AUTHOR = u"Sophist"
PLUGIN_DESCRIPTION = u'Sort Multi-Value Tags e.g. Performers alphabetically.'
PLUGIN_VERSION = "0.3"
PLUGI | N_API_VERSIONS = ["0.15.0", "0.15.1", "0.16.0", "1.0.0", "1.1.0", "1.2.0", "1.3.0"]
from picard.metadata import register_track_metadata_processor
# Exclude the following tags because the sort order is | related to other tags or has a meaning like primary artist
_sort_multivalue_tags_exclude = [
'artists', '~artists_sort', 'musicbrainz_artistid',
'albumartist', '~albumartists_sort', 'musicbrainz_albumartistid',
'work', 'musicbrainz_workid',
'label', 'catalognumber',
'country', 'date',
'releasetype',
]
def sort_multivalue_tags(album, metadata, *args):
for tag in metadata.keys():
if tag in _sort_multivalue_tags_exclude:
continue
data = dict.get(metadata, tag)
if len(data) > 1:
sorted_data = sorted(data)
if data != sorted_data:
metadata.set(tag, sorted_data)
register_track_metadata_processor(sort_multivalue_tags)
|
danche354/Sequence-Labeling | ner/evaluate-senna-hash-pos-chunk-128-64.py | Python | mit | 1,788 | 0.008949 | '''
evaluate result
'''
from keras.models import load_model
from keras.utils import np_utils
import numpy as np
import os
import sys
# add path
sys.path.append('../')
sys.path.append('../tools')
from tools import conf
from tools import load_data
from tools import prepare
# input sentence dimensions
step_length = conf.ner_step_length
pos_length = conf | .ner_pos_length |
chunk_length = conf.ner_chunk_length
IOB = conf.ner_IOB_decode
test_data = load_data.load_ner(dataset='eng.testb')
best_epoch = sys.argv[1]
model_name = os.path.basename(__file__)[9:-3]
folder_path = './model/%s'%model_name
model_path = '%s/model_epoch_%s.h5'%(folder_path, best_epoch)
result = open('%s/predict.txt'%folder_path, 'w')
print('loading model...')
model = load_model(model_path)
print('loading model finished.')
for each in test_data:
embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=[each])
pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)])
chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)])
prob = model.predict_on_batch([embed_index, hash_index, pos, chunk])
for i, l in enumerate(length):
predict_label = np_utils.categorical_probas_to_classes(prob[i])
chunktags = [IOB[j] for j in predict_label][:l]
word_pos_chunk = list(zip(*each))
for ind, chunktag in enumerate(chunktags):
result.write(' '.join(word_pos_chunk[ind])+' '+chunktag+'\n')
result.write('\n')
result.close()
print('epoch %s predict over !'%best_epoch)
os.system('../tools/conlleval < %s/predict.txt'%folder_path)
|
JSBCCA/pythoncode | early_projects/practice_loop.py | Python | mit | 191 | 0 | def int_list():
howmany = int(input("How many inputs? "))
nums = []
f | or i in range(howmany):
x = int(input("Inp | ut: "))
nums.append(x)
print(nums)
int_list()
|
hassanabidpk/djangoproject.com | docs/tests.py | Python | bsd-3-clause | 2,750 | 0.004364 | import os
from pathlib import Path
from django.contrib.sites.models import Site
from django.core.urlresolvers import set_urlconf
from django.template import Context, Template
from django.test import TestCase
from .utils import get_doc_path
class SearchFormTestCase(TestCase):
fixtures = ['doc_test_fixtures']
def setUp(self):
# We need to create an extra Site because docs have SITE_ID=2
Site.objects.create(name='Django test', domain="example.com")
@classmethod
def tearDownClass(cls):
# cleanup URLconfs changed by django-hosts
set_urlconf(None)
super(SearchFormTestCase, cls).tearDownClass()
def test_empty_get(self):
response = self.client.get('/en/dev/search/',
HTTP_HOST='docs.djangoproject.dev:8000')
self.assertEqual(response.status_code, 200)
class TemplateTagTests(TestCase):
def test_pygments_template_tag(self):
template = Template('''
{% load docs %}
{% pygment 'python' %}
def band_listing(request):
"""A view of all bands."""
bands = models.Band.objects.all()
return render(request, 'bands/band_listing.html', {'bands': bands})
{% endpygment %}
''')
self.assertEqual(
template.render(Context()),
"\n\n<div class=\"highlight\"><pre><span class=\"k\">def</span> <span class= | \"nf\">"
"band_listing</span><span class=\"p\">(</span><span class=\"n\">request</span><span "
"class=\"p\">):</span>\n <span class=\"sd\">"""A view of all bands"
"."""</span>\n <span class=\"n\">bands</span> <span class=\"o\">="
| "</span> <span class=\"n\">models</span><span class=\"o\">.</span><span class=\"n\">"
"Band</span><span class=\"o\">.</span><span class=\"n\">objects</span><span "
"class=\"o\">.</span><span class=\"n\">all</span><span class=\"p\">()</span>\n "
"<span class=\"k\">return</span> <span class=\"n\">render</span><span class=\"p\">"
"(</span><span class=\"n\">request</span><span class=\"p\">,</span> <span class=\"s\">"
"'bands/band_listing.html'</span><span class=\"p\">,</span> <span class=\"p\">"
"{</span><span class=\"s\">'bands'</span><span class=\"p\">:</span> "
"<span class=\"n\">bands</span><span class=\"p\">})</span>\n</pre></div>\n\n"
)
class TestUtils(TestCase):
def test_get_doc_path(self):
# non-existent file
self.assertEqual(get_doc_path(Path('root'), 'subpath.txt'), None)
# existing file
path, filename = __file__.rsplit(os.path.sep, 1)
self.assertEqual(get_doc_path(Path(path), filename), None)
|
stonebig/winpython | winpython/_vendor/qtpy/tests/test_qtxmlpatterns.py | Python | mit | 1,117 | 0.000895 | import pytest
from qtpy import PYSIDE2, PYSIDE6, PYQT6
@pytest.mark.skipif((PYSIDE6 or PYQT6), reason="not available with qt 6.0")
def test_qtxmlpatterns():
"""Test the qtpy.QtXmlPatterns namespace"""
from qtpy import QtXmlPatterns
assert QtXmlPatterns.QAbstractMessageHandler is not None
assert QtXmlPatterns.QAbstractUriResolver is not None
assert QtXmlPatterns.QAbstractXmlNodeModel is not None
assert QtXmlPatterns.QAbstractXmlReceiver is not None
if not PYSIDE2:
assert QtXmlPatterns.QSimpleXmlNodeModel is not None
assert QtXmlPatterns.QSourceLocation is not None
assert QtXmlPatterns.QXmlFormatter is not None
assert QtXmlPatterns.QXmlItem is not None
assert QtXmlPatterns.QXmlName is not None
assert QtXmlPatterns.QXmlNamePool is not None
assert QtXmlPatterns.QXmlNodeModelIndex is not None
assert QtXmlPatterns.QXmlQuery is not None
assert QtXmlPatterns.QXmlResultItems is not None
assert | QtXmlPatterns.QXmlSchema is not None
assert QtXmlPatterns.QXmlSchemaValidator is not None
assert QtXmlPatterns.QXmlSe | rializer is not None
|
xmaruto/mcord | xos/synchronizers/base/syncstep.py | Python | apache-2.0 | 10,897 | 0.012389 | import os
import base64
from datetime import datetime
from xos.config import Config
from xos.logger import Logger, logging
from synchronizers.base.steps import *
from django.db.models import F, Q
from core.models import *
from django.db import reset_queries
from synchronizers.base.ansible import *
from generate.dependency_walker import *
from time import time
import json
import time
import pdb
logger = Logger(level=logging.INFO)
def f7(seq):
seen = set()
seen_add = seen.add
return [ x for x in seq if not (x in seen or seen_add(x))]
def elim_dups(backend_str):
strs = backend_str.split(' // ')
strs2 = f7(strs)
return ' // '.join(strs2)
def deepgetattr(obj, attr):
return reduce(getattr, attr.split('.'), obj)
class InnocuousException(Exception):
pass
class DeferredException(Exception):
pass
class FailedDependency(Exception):
pass
class SyncStep(object):
""" An XOS Sync step.
Attributes:
psmodel Model name the step synchronizes
dependencies list of names of models that must be synchronized first if the current model depends on them
"""
# map_sync_outputs can return this value to cause a step to be marked
# successful without running ansible. Used for sync_network_controllers
# on nat networks.
SYNC_WITHOUT_RUNNING = "sync_without_running"
slow=False
def get_prop(self, prop):
try:
sync_config_dir = Config().sync_config_dir
except:
sync_config_dir = '/etc/xos/sync'
prop_config_path = '/'.join(sync_config_dir,self.name,prop)
return open(prop_config_path).read().rstrip()
def __init__(self, **args):
"""Initialize a sync step
Keyword arguments:
name -- Name of the step
provides -- XOS models sync'd by this step
"""
dependencies = []
self.driver = args.get('driver')
self.e | rror_map = args.get('error_map')
try:
self.soft_deadline = int(self.get_prop('soft_deadline_seconds'))
except:
self.soft_deadline = 5 # 5 seconds
return
def fetch_pending(self, deletion=False):
# This is the most common implementation of fetch_pending
| # Steps should override it if they have their own logic
# for figuring out what objects are outstanding.
main_objs = self.observes
if (type(main_objs) is not list):
main_objs=[main_objs]
objs = []
for main_obj in main_objs:
if (not deletion):
lobjs = main_obj.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False),Q(no_sync=False))
else:
lobjs = main_obj.deleted_objects.all()
objs.extend(lobjs)
return objs
#return Instance.objects.filter(ip=None)
def check_dependencies(self, obj, failed):
for dep in self.dependencies:
peer_name = dep[0].lower() + dep[1:] # django names are camelCased with the first letter lower
peer_objects=[]
try:
peer_names = plural(peer_name)
peer_object_list=[]
try:
peer_object_list.append(deepgetattr(obj, peer_name))
except:
pass
try:
peer_object_list.append(deepgetattr(obj, peer_names))
except:
pass
for peer_object in peer_object_list:
try:
peer_objects.extend(peer_object.all())
except AttributeError:
peer_objects.append(peer_object)
except:
peer_objects = []
if (hasattr(obj,'controller')):
try:
peer_objects = filter(lambda o:o.controller==obj.controller, peer_objects)
except AttributeError:
pass
if (failed in peer_objects):
if (obj.backend_status!=failed.backend_status):
obj.backend_status = failed.backend_status
obj.save(update_fields=['backend_status'])
raise FailedDependency("Failed dependency for %s:%s peer %s:%s failed %s:%s" % (obj.__class__.__name__, str(getattr(obj,"pk","no_pk")), peer_object.__class__.__name__, str(getattr(peer_object,"pk","no_pk")), failed.__class__.__name__, str(getattr(failed,"pk","no_pk"))))
def sync_record(self, o):
try:
controller = o.get_controller()
controller_register = json.loads(controller.backend_register)
if (controller_register.get('disabled',False)):
raise InnocuousException('Controller %s is disabled'%controller.name)
except AttributeError:
pass
tenant_fields = self.map_sync_inputs(o)
if tenant_fields == SyncStep.SYNC_WITHOUT_RUNNING:
return
main_objs=self.observes
if (type(main_objs) is list):
main_objs=main_objs[0]
path = ''.join(main_objs.__name__).lower()
res = run_template(self.playbook,tenant_fields,path=path)
try:
self.map_sync_outputs(o,res)
except AttributeError:
pass
def delete_record(self, o):
try:
controller = o.get_controller()
controller_register = json.loads(o.node.site_deployment.controller.backend_register)
if (controller_register.get('disabled',False)):
raise InnocuousException('Controller %s is disabled'%sliver.node.site_deployment.controller.name)
except AttributeError:
pass
tenant_fields = self.map_delete_inputs(o)
main_objs=self.observes
if (type(main_objs) is list):
main_objs=main_objs[0]
path = ''.join(main_objs.__name__).lower()
tenant_fields['delete']=True
res = run_template(self.playbook,tenant_fields,path=path)
try:
self.map_delete_outputs(o,res)
except AttributeError:
pass
def call(self, failed=[], deletion=False):
#if ('Instance' in self.__class__.__name__):
# pdb.set_trace()
pending = self.fetch_pending(deletion)
for o in pending:
# another spot to clean up debug state
try:
reset_queries()
except:
# this shouldn't happen, but in case it does, catch it...
logger.log_exc("exception in reset_queries",extra=o.tologdict())
sync_failed = False
try:
backoff_disabled = Config().observer_backoff_disabled
except:
backoff_disabled = 0
try:
scratchpad = json.loads(o.backend_register)
if (scratchpad):
next_run = scratchpad['next_run']
if (not backoff_disabled and next_run>time.time()):
sync_failed = True
except:
logger.log_exc("Exception while loading scratchpad",extra=o.tologdict())
pass
if (not sync_failed):
try:
for f in failed:
self.check_dependencies(o,f) # Raises exception if failed
if (deletion):
self.delete_record(o)
o.delete(purge=True)
else:
new_enacted = datetime.now() # Is this the same timezone? XXX
self.sync_record(o)
o.enacted = new_enacted
scratchpad = {'next_run':0, 'exponent':0, 'last_success':time.time()}
o.backend_register = json.dumps(scratchpad)
o.backend_status = "1 - OK"
o.save(update_fields=['enacted','backend_status','backend_register'])
except (InnocuousException,Exception,DeferredException) as e:
logger.log_exc("sync step failed!",extra=o.tologdict())
|
jodaiber/semantic_compound_splitting | visualization_and_test/evaluate_candidates_indiv.py | Python | apache-2.0 | 4,883 | 0.006349 | __author__ = 'rwechsler'
import cPickle as pickle
import itertools
import random
from annoy import AnnoyIndex
import multiprocessing as mp
import sys
import argparse
import time
import datetime
import numpy as np
def timestamp():
return datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
def load_candidate_dump(file_name):
return pickle.load(open(file_name, "rb"))
def load_annoy_tree(model_file_name, vector_dims):
tree = AnnoyIndex(vector_dims)
tree.load(model_file_name)
return tree
def annoy_knn(annoy_tree, vector, true_index, k=100):
neighbours = annoy_tree.get_nns_by_vector(list(vector), k)
if true_index in neighbours:
return True
else:
return False
def test_pair(pair1, pair2, word2vec_model, k=100, show=30):
"""
Only used in interactive mode so far.
:param pair1:
:param pair2:
:param word2vec_model:
:param k:
:param show:
:return:
"""
prefix = pair1[0]
fl1 = pair1[1]
tail1 = pair1[2]
prefix2 = pair2[0]
fl2 = pair2[1]
tail2 = pair2[2]
assert prefix == prefix2
diff = word2vec_model[prefix + fl2 + tail2.lower()] - word2vec_model[tail2]
predicted = word2vec_model[tail1] + diff
true_word = prefix + fl1 + tail1.lower()
neighbours = word2vec_model.most_similar([predicted], topn=k)
print neighbours[:show]
neighbours, _ = zip(*neighbours)
print "Found: ", true_word in neighbours
def candidate_generator(candidates, rank_threshold, sample_size, annoy_tree_file, vector_dims, lock):
for prefix in candidates:
yield (prefix, candidates[prefix], annoy_tree_file, vector_dims, lock, rank_threshold, sample_size)
def mp_wrapper_evaluate_set(argument):
return evaluate_set(*argument)
if __name__ == "__main__":
#### Default Parameters-------------------------------------------####
rank_threshold = 100
sample_set_size = 500
n_processes = 2
####End-Parametes-------------------------------------------------####
parser = argparse.ArgumentParser(description='Evaluate candidates')
parser.add_argument('-d', action="store", dest="vector_dims", type=int, required=True)
parser.add_argument('-t', action="store", dest="annoy_tree_file", required=True)
parser.add_argument('-c', action="store", dest="candidates_index_file", required=True)
parser.add_argument('-o', action="store", dest="result_output_file", required=True)
parser.add_argument('-p', action="store", dest="n_processes", type=int, default=n_processes)
parser.add_argument('-s', action="store", dest="sample_set_size", type=int, default=sample_set_size)
parser.add_argument('-r', action="store", dest="rank_threshold", type=int, default=rank_threshold)
arguments = parser.parse_args(sys.argv[1:])
print timestamp(), "loading candidates"
candidates = load_candidate_dump(arguments.candidates_index_file)
print timestamp(), "load annoy tree"
# global annoy_tree
#annoy_tree = load_annoy_tree(arguments.annoy_tree_file, arguments.vector_dims)
annoy_tree_file = arguments.annoy_tree_file
vector_dims = arguments.vector_dims
manager = mp.Manager()
lock = manager.Lock()
def evaluate_set(prefix, tails, annoy_tree_file, vector_dims, lock, rank_threshold=100, sample_size=1000):
#fname = ''.join(annoy_tree_file)
lock.acquire()
try:
annoy_tree = AnnoyIndex(vector_dims)
annoy_tree.load(annoy_tree_file)
finally:
lock.release()
# annoy_tree = load_annoy_tree(annoy_tree_file, vector_dims)
print mp.current_process().name, id(annoy_tree), prefix.encode('utf-8')
sys.stdout.flush()
counts = dict()
counts[True] = 0
counts[False] = 0
if len(tails) > sample_size:
tails = random.sample(tails, sample_size)
for (comp1, tail1), (comp2, tail2) in itertools.combinations(tails, 2):
diff = np.array(annoy_tree.get_item_vector(comp2))- np.array(annoy_tree.get_item_vector(tail2))
predicted = np.array(annoy_tree.get_item_vector(tail1)) + diff
result = annoy_knn(annoy_tree, predicted, comp1, rank_threshold)
counts[result] += 1
annoy_tree.unload(annoy_tree_file)
return (prefix, float(counts[True]) / (counts[True] + counts[False])) if counts[True] + counts[False] > 0 else (prefix, 0.0)
print timestamp(), "evaluating candidates"
pool | = mp.Pool(processes=arguments.n_processes)
params = candidate_generator(candidates, arguments.rank_threshold, arguments.sample_set_size, annoy_tree_file, vector_dims, lock)
results = pool.map(mp_wrapper_evaluate_set, params)
| print timestamp(), "pickling"
pickle.dump(results, open(arguments.result_output_file, "wb"))
print timestamp(), "done"
|
kjs73/pele | examples/gui/bljsystem.py | Python | gpl-3.0 | 2,178 | 0.003673 | """
start a gui for a binary lennard jones cluster.
All that is really needed to start a gui is define a system and call run_gui
system = BLJCluster(natoms, ntypeA)
run_gui(system)
"""
import sys
from PyQt4 import QtGui
from pele.systems import BLJCluster
from pele.gui import run_gui
from _blj_dialog import Ui_DialogLJSetup as UI
class BLJDialog(QtGui.QDialog):
def __init__(self):
QtGui.QDialog.__init__(self)
self.ui = UI()
self.ui.setupUi(self)
self.setWindowTitle("Create binary Lennard-Jones system")
self.natoms = None
# self.ui.buttonBox.Ok.setDefault(True)
# self.ui.buttonBox.Ok.setDefault(True)
def get_input(self):
self.natoms = int(self.ui.lineEdit_natoms.text())
self.ntypeA = int(self.ui.lineEdit_ntypeA.text())
self.sigAB = float(self.ui.lineEdi | t_sigAB.text())
self.epsAB = float(self.ui.lineEdit_epsAB.text())
self.sigBB = float(self.ui.lineEdit_sigBB.text())
self.epsBB = float(self.ui.lineEdit_epsBB.text())
| self.sigAA = 1.
self.epsAA = 1.
def on_buttonBox_accepted(self):
self.get_input()
self.close()
def on_buttonBox_rejected(self):
self.close()
if __name__ == "__main__":
# create a pop up window to get the number of atoms
app = QtGui.QApplication(sys.argv)
dialog = BLJDialog()
dialog.exec_()
if dialog.natoms is None:
sys.exit()
print dialog.ntypeA, "A atoms interacting with eps", dialog.epsAA, "sig", dialog.sigAA
print dialog.natoms - dialog.ntypeA, "B atoms interacting with eps", dialog.epsBB, "sig", dialog.sigBB
print "The A and B atoms interact with eps", dialog.epsAB, "sig", dialog.sigAB
# create the system and start the gui
# (note: since the application is already started we need to pass it to run_gui)
system = BLJCluster(dialog.natoms, dialog.ntypeA,
sigAB=dialog.sigAB,
epsAB=dialog.epsAB,
sigBB=dialog.sigBB,
epsBB=dialog.epsBB,
)
run_gui(system, application=app)
|
charlesll/RamPy | rampy/tests/test_mlregressor.py | Python | gpl-2.0 | 1,520 | 0.017763 | import unittest
import numpy as np
np.random.seed(42)
import scipy
from scipy.stats import norm
import rampy as rp
class TestML(unittest.TestCase):
def test_mlregressor(self):
x = np.arange(0,600,1.0)
nb_samples = 100 # number of samples in our dataset
# true partial spectra
S_1 = norm.pdf(x,loc=200.,scale=130.)
S_2 = norm.pdf(x,loc=400,scale=70)
S_true = np.vstack((S_1,S_2))
#60 samples with random concentrations between 0 and 1
C_ = np.random.rand(nb_samples)
C_true = np.vstack((C_,(1-C_))).T
# We make some observations with random noise
Obs = np.dot(C_true,S_true) + np.random.randn(nb_samples,len(x))*1e-4
# new observations
C_new_ = np.random.rand(10) #10 samples with random concentrations between 0 and 1
C_new_true = np.vstack((C_new_,(1- | C_new_))).T
noise_new = np.random.randn(len(x))*1e-4
Obs_new = np.dot(C_new_true,S_true) + noise_new
model = rp.mlregressor(Obs,C_true[:,0].reshape(-1,1))
for i in ["KernelRidge", "SVM", "LinearRegression", "NeuralNet", "BaggingNeuralNet"]:
# we do not test on Lasso and ElasticNet as this raises lots of warning due to convergence issues...
model.algorithm = i
model | .user_kernel = 'poly'
model.fit()
C_new_predicted = model.predict(Obs_new)
# testing if refit works
model.refit()
if __name__ == '__main__':
unittest.main()
|
sobomax/virtualbox_64bit_edd | src/VBox/ValidationKit/tests/additions/tdAddBasic1.py | Python | gpl-2.0 | 11,512 | 0.016157 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id: tdAddBasic1.py $
"""
VirtualBox Validation Kit - Additions Basics #1.
"""
__copyright__ = \
"""
Copyright (C) 2010-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 100880 $"
# Standard Python imports.
import os;
import sys;
# Only the main script needs to modify the path.
try: __file__
except: __file__ = sys.argv[0];
g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
sys.path.append(g_ksValidationKitDir);
# Validation Kit imports.
from testdriver import reporter;
from testdriver import base;
from testdriver import vbox;
from testdriver import vboxcon;
# Sub test driver imports.
sys.path.append(os.path.dirname(os.path.abspath(__file__))); # For sub-test drivers.
from tdAddGuestCtrl import SubTstDrvAddGuestCtrl;
class tdAddBasic1(vbox.TestDriver): # pylint: disable=R0902
"""
Additions Basics #1.
"""
## @todo
# - More of the settings stuff can e and need to be generalized!
#
def __init__(self):
vbox.TestDriver.__init__(self);
self.oTestVmSet = self.oTestVmManager.getStandardVmSet('nat');
self.asTestsDef = ['guestprops', 'stdguestprops', 'guestcontrol'];
self.asTests = self.asTestsDef;
self.addSubTestDriver(SubTstDrvAddGuestCtrl(self));
#
# Overridden methods.
#
def showUsage(self):
rc = vbox.TestDriver.showUsage(self);
reporter.log('');
reporter.log('tdAddBasic1 Options:');
reporter.log(' --tests <s1[:s2[:]]>');
reporter.log(' Default: %s (all)' % (':'.join(self.asTestsDef)));
reporter.log(' --quick');
reporter.log(' Same as --virt-modes hwvirt --cpu-counts 1.');
return rc;
def parseOption(self, asArgs, iArg): # pylint: disable=R0912,R0915
if asArgs[iArg] == '--tests':
iArg += 1;
if iArg >= len(asArgs): raise base.InvalidOption('The "--tests" takes a colon separated list of tests');
self.asTests = asArgs[iArg].split(':');
for s in self.asTests:
if s not in self.asTestsDef:
raise base.InvalidOption('The "--tests" value "%s" is not valid; valid values are: %s' \
% (s, ' '.join(self.asTestsDef)));
elif asArgs[iArg] == '--quick':
self.parseOption(['--virt-modes', 'hwvirt'], 0);
self.parseOption(['--cpu-counts', '1'], 0);
else:
return vbox.TestDriver.parseOption(self, asArgs, iArg);
return iArg + 1;
def actionConfig(self):
if not self.importVBoxApi(): # So we can use the constant below.
return False;
eNic0AttachType = vboxcon.NetworkAttachmentType_NAT;
sGaIso = self.getGuestAdditionsIso();
return self.oTestVmSet.actionConfig(self, eNic0AttachType = eNic0AttachType, sDvdImage = sGaIso);
def actionExecute(self):
return self.oTestVmSet.actionExecute(self, self.testOneCfg);
#
# Test execution helpers.
#
def testOneCfg(self, oVM, oTestVm):
"""
Runs the specified VM thru the tests.
Returns a success indicator on the general test execution. This is not
the actual test result.
"""
fRc = False;
self.logVmInfo(oVM);
oSession, oTxsSession = self.startVmAndConnectToTxsViaTcp(oTestVm.sVmName, fCdWait = True, \
sFileCdWait = 'AUTOR | UN.INF');
if oSession is not None:
self.addTask(oSession);
# Do the testing.
reporter.testStart('Install');
fRc, oTxsSession = self.testInstallAdditions(oSession, oTxsSession, oTestVm);
reporter.testDone();
fSkip = not fRc;
reporter.testStart('Guest Properties');
if not fSkip:
fRc = self.testGuestProperties(oSession, oTxsSession, oTestVm) and fRc;
reporter. | testDone(fSkip);
reporter.testStart('Guest Control');
if not fSkip:
(fRc2, oTxsSession) = self.aoSubTstDrvs[0].testIt(oTestVm, oSession, oTxsSession);
fRc = fRc2 and fRc;
reporter.testDone(fSkip);
## @todo Save an restore test.
## @todo Reset tests.
## @todo Final test: Uninstallation.
# Cleanup.
self.removeTask(oTxsSession);
#self.terminateVmBySession(oSession)
return fRc;
def testInstallAdditions(self, oSession, oTxsSession, oTestVm):
"""
Tests installing the guest additions
"""
if oTestVm.isWindows():
fRc = self.testWindowsInstallAdditions(oSession, oTxsSession, oTestVm);
else:
reporter.error('Guest Additions installation not implemented for %s yet! (%s)' % \
(oTestVm.sKind, oTestVm.sVmName));
fRc = False;
#
# Verify installation of Guest Additions using commmon bits.
#
if fRc is True:
#
# Wait for the GAs to come up.
#
## @todo need to signed up for a OnAdditionsStateChanged and wait runlevel to
# at least reach Userland.
#
# Check if the additions are operational.
#
try: oGuest = oSession.o.console.guest;
except:
reporter.errorXcpt('Getting IGuest failed.');
return (False, oTxsSession);
# Check the additionsVersion attribute. It must not be empty.
reporter.testStart('IGuest::additionsVersion');
fRc = self.testIGuest_additionsVersion(oGuest);
reporter.testDone();
reporter.testStart('IGuest::additionsRunLevel');
self.testIGuest_additionsRunLevel(oGuest, oTestVm);
reporter.testDone();
## @todo test IAdditionsFacilities.
return (fRc, oTxsSession);
def testWindowsInstallAdditions(self, oSession, oTxsSession, oTestVm):
"""
Installs the Windows guest additions using the test execution service.
Since this involves rebooting the guest, we will have to create a new TXS session.
"""
asLogFile = [];
# Delete relevant log files.
if oTestVm.sKind in ('WindowsNT4',):
sWinDir = 'C:/WinNT/';
else:
sWinDir = 'C:/Windows/';
asLogFile = [sWinDir+'setupapi.log', sWinDir+'setupact.log', sWinDir+'setuperr.log'];
for sFile in asLogFile:
self.txsRmFile(oSession, oTxsSession, sFile);
# Install the public signing key.
if oTestVm.sKind not in ('WindowsNT4', 'Windows2000', 'WindowsXP', 'Windows2003'):
## TODO
pass;
#
# The actual install.
# Enable installing the optional auto-logon modules (VBoxGINA/VBoxCredProv) + (Direct)3D support.
# Also tell the installer to produce the appropriate log files.
#
fRc = self.txsRunTest(oTxsSession, 'VBoxWindowsAdditions.exe', 5 * 6 |
leilihh/novaha | nova/cmd/all.py | Python | apache-2.0 | 3,403 | 0.000294 | # Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for all nova services.
This script attempts to start all the nova services in one process. Each
service is started in its own greenthread. Please note that exceptions and
sys.exit() on the starting of a service are logged and the script will
continue attempting to launch the rest of the services.
"""
import sys
from oslo.config import cfg
from nova import config
from nova.objectstore import s3server
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import service
from nova import utils
from nova.vnc import xvp_proxy
CONF = cfg.CONF
CONF.import_opt('manager', 'nova.conductor.api', group='conductor')
CONF.import_opt('topic', 'nova.conductor.api', group='conductor')
CONF.import_opt('enabled_apis', 'nova.service')
CONF.import_opt('enabled_ssl_apis', 'nova.service')
def main():
config.parse_args(sys.argv)
logging.setup("nova")
LOG = logging.getLogger('nova.all')
utils.monkey_patch()
launcher = service.process_launcher()
# nova-api
for api in CONF.enabled_apis:
try:
should_use_ssl = api in CONF.enabled_ssl_apis
server = service.WSGIService(api, use_ssl=should_use_ssl)
launcher.launch_service(server, workers=server.workers or 1)
except (Exception, SystemExit):
LOG.exception(_('Failed to load %s') % '%s-api' % api)
for mod in [s3server, xvp_proxy]:
try:
launcher.launch_service(mod.get_wsgi_server())
except (Exception, SystemExit):
LOG.exception(_('Failed to load %s') % mod.__name__)
for binary in ['nova-compute', 'nova-network', 'nova-scheduler',
'nova-cert', 'nova-conductor', 'nova-kvmha']:
# FIXME(sirp): Most service configs are defined in nova/service.py, but
# conductor has set a new precedent of storing these configs
# nova/<service>/api.py.
#
# We should update the existing services to use this new approach so we
# don't have to treat conductor differently here.
if binary == 'nova-conductor':
topic = CONF.conductor.topic
manager = CONF.conductor.manager
else:
topic = None
manager = None
try:
la | uncher.launch_service(service.Service.create(binary=binary,
topic=topic,
| manager=manager))
except (Exception, SystemExit):
LOG.exception(_('Failed to load %s'), binary)
launcher.wait()
|
lisprolog/python | to_encrypt.py | Python | bsd-3-clause | 1,378 | 0.006531 | '''
This mission is the part of the set. Another | one - Caesar cipher decriptor.
Your mission is to encrypt a secret message (text only, without special chars like "!", "&", "?" etc.) using Caesar cipher where each letter of inpu | t text is replaced by another that stands at a fixed distance. For example ("a b c", 3) == "d e f"
example
Input: A secret message as a string (lowercase letters only and white spaces)
Output: The same string, but encrypted
Precondition:
0 < len(text) < 50
-26 < delta < 26
'''
def to_encrypt(text, delta):
alpha = 'abcdefghijklmnopqrstuvwxyz'
result = ''
for letter in text:
index = alpha.find(letter)
if(index > -1):
print(index+delta)
result = result + result.join(alpha[(index + delta) % 26])
else:
result = result + result.join(' ')
return result
if __name__ == '__main__':
print("Example:")
print(to_encrypt('abc', 10))
#These "asserts" using only for self-checking and not necessary for auto-testing
assert to_encrypt("a b c", 3) == "d e f"
assert to_encrypt("a b c", -3) == "x y z"
assert to_encrypt("simple text", 16) == "iycfbu junj"
assert to_encrypt("important text", 10) == "swzybdkxd dohd"
assert to_encrypt("state secret", -13) == "fgngr frperg"
print("Coding complete? Click 'Check' to earn cool rewards!")
|
dennybaa/st2 | st2actions/st2actions/runners/python_action_wrapper.py | Python | apache-2.0 | 6,375 | 0.002039 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
import argparse
from st2common import log as logging
from st2actions import config
from st2actions.runners.pythonrunner import Action
from st2actions.runners.utils import get_logger_for_python_runner_action
from st2actions.runners.utils import get_action_class_instance
from st2common.util import loader as action_loader
from st2common.util.config_parser import ContentPackConfigParser
from st2common.constants.action import ACTION_OUTPUT_RESULT_DELIMITER
from st2common.service_setup import db_setup
from st2common.services.datastore import DatastoreService
__all__ = [
'PythonActionWrapper',
'ActionService'
]
LOG = logging.getLogger(__name__)
class ActionService(object):
"""
Instance of this class is passed to the action instance and exposes "public"
methods which can be called by the action.
"""
def __init__(self, action_wrapper):
logger = get_logger_for_python_runner_action(action_name=action_wrapper._class_name)
self._action_wrapper = action_wrapper
self._datastore_service = DatastoreService(logger=logger,
pack_name=self._action_wrapper._pack,
class_name=self._action_wrapper._class_name,
api_username='action_service')
##################################
# Methods for datastore management
##################################
def list_values(self, local=True, prefix=None):
return self._datastore_service.list_values(local, prefix)
def get_value(self, name, local=True):
return self._datastore_service.get_value(name, local)
def set_value(self, name, value, ttl=None, local=True):
return self._datastore_service.set_value(name, value, ttl, local)
def delete_value(self, name, local=True):
return self._datastore_service.delete_value(name, local)
class PythonActionWrapper(object):
def __init__(self, pack, file_path, parameters=None, parent_args=None):
"""
:param pack: Name of the pack this action belongs to.
:type pack: ``str``
:param file_path: Path to the action module.
:type file_path: ``str``
:param parameters: action parameters.
:type parameters: ``dict`` or ``None``
:param parent_args: Command line arguments passed to the parent process.
:type parse_args: ``list``
"""
self._pack = | pack
self._file_path = file_path
self._parameters = parameters or {}
self._parent_args = parent_args or []
self._class_name = None
self._logger = logging.getLogger('PythonActionWrapper')
try:
config.parse_args(args=self._parent_args)
except Exception:
pass
else:
db_setup()
def run(self):
action = self._get_action_insta | nce()
output = action.run(**self._parameters)
# Print output to stdout so the parent can capture it
sys.stdout.write(ACTION_OUTPUT_RESULT_DELIMITER)
print_output = None
try:
print_output = json.dumps(output)
except:
print_output = str(output)
sys.stdout.write(print_output + '\n')
sys.stdout.write(ACTION_OUTPUT_RESULT_DELIMITER)
def _get_action_instance(self):
actions_cls = action_loader.register_plugin(Action, self._file_path)
action_cls = actions_cls[0] if actions_cls and len(actions_cls) > 0 else None
if not action_cls:
raise Exception('File "%s" has no action or the file doesn\'t exist.' %
(self._file_path))
config_parser = ContentPackConfigParser(pack_name=self._pack)
config = config_parser.get_action_config(action_file_path=self._file_path)
if config:
LOG.info('Using config "%s" for action "%s"' % (config.file_path,
self._file_path))
config = config.config
else:
LOG.info('No config found for action "%s"' % (self._file_path))
config = None
action_service = ActionService(action_wrapper=self)
action_instance = get_action_class_instance(action_cls=action_cls,
config=config,
action_service=action_service)
return action_instance
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Python action runner process wrapper')
parser.add_argument('--pack', required=True,
help='Name of the pack this action belongs to')
parser.add_argument('--file-path', required=True,
help='Path to the action module')
parser.add_argument('--parameters', required=False,
help='Serialized action parameters')
parser.add_argument('--parent-args', required=False,
help='Command line arguments passed to the parent process')
args = parser.parse_args()
parameters = args.parameters
parameters = json.loads(parameters) if parameters else {}
parent_args = json.loads(args.parent_args) if args.parent_args else []
assert isinstance(parent_args, list)
obj = PythonActionWrapper(pack=args.pack,
file_path=args.file_path,
parameters=parameters,
parent_args=parent_args)
obj.run()
|
google-code/billreminder | src/lib/dbus_actions.py | Python | mit | 5,266 | 0.001899 | # -*- coding: utf-8 -*-
__all__ = ['Actions']
import dbus
import dbus.service
import os
from subprocess import Popen
import dal
import bill
from lib import common, scheduler
from lib.utils import force_string
from lib.utils import Message
from db.billstable import BillsTable
class Actions(object):
def __init__(self, databaselayer=None):
try:
session_bus = dbus.SessionBus()
obj = session_bus.get_object(common.DBUS_INTERFACE,
common.DBUS_PATH)
self.dbus_interface = dbus.Interface(obj, common.DBUS_INTERFACE)
pid = os.getpid()
print self.dbus_interface.register(pid)
except dbus.DBusException:
if Message().ShowErrorQuestion( \
_("An error occurred while connecting to BillReminder Notifier!\n"\
"Do you want to launch it and restart BillReminder?")):
Popen('python billreminderd --open-gui', shell=True)
raise SystemExit
return False
def _correct_type(self, record):
if 'Id' in record.keys():
record['Id'] = int(record['Id'])
if 'dueDate' in record.keys():
record['dueDate'] = int(record['dueDate'])
if 'amountDue' in record.keys():
record['amountDue'] = float(record['amountDue'].replace(',', '.'))
if 'paid' in record.keys():
record['paid'] = int(record['paid'])
if 'alarm' in record.keys():
record['alarm'] = int(record['alarm'])
if 'caId' in record.keys():
record['caId'] = int(record['caId'])
return record
def get_monthly_bills(self, status, month, year):
# Delimeters for our search
firstOfMonth = scheduler.first_of_month(month, year)
lastOfMonth = scheduler.last_of_month(month, year)
# Determine status criteria
status = status < 2 and ' = %s' % status or ' in (0,1)'
records = self.get_bills('paid %s' \
' and dueDate >= %s and dueDate <= %s' \
' ORDER BY dueDate DESC' % (status, firstOfMonth, lastOfMonth))
return records
def get_bills(self, kwargs):
""" Returns one or more records that meet the criteria passed """
try:
ret = []
if isinstance(kwargs, basestring):
records = self.dbus_interface.get_bills_(kwargs)
else:
records = self.dbus_interface.get_bills(force_string(kwargs))
for record in records:
record = self._correct_type(record)
ret.append(record)
return ret
except dbus.DBusException:
if self.__init__():
return self.get_bills(kwargs)
def add_bill(self, kwargs):
""" Adds a bill to the database """
try:
record = self.dbus_interface.add_bill(force_string(kwargs))
return self._correct_type(record)
except dbus.DBusException:
if self.__init__():
return self.add_bill(kwargs)
def edit_bill(self, kwargs):
""" Edit a record in the database """
try:
record = self.dbus_interface.edit_bill | (force_string(kwargs))
return self._correct_type(record)
except dbus.DBusException:
if self.__init__():
return self.edit_bill(kwargs)
def delete_bill(self, key):
""" Delete a record in the database """
try:
return self.dbus_interf | ace.delete_bill(key)
except dbus.DBusException:
if self.__init__():
return self.delete_bill(kwargs)
def get_categories(self, kwargs):
""" Returns one or more records that meet the criteria passed """
try:
ret = []
if isinstance(kwargs, basestring):
records = self.dbus_interface.get_categories_(kwargs)
else:
records = self.dbus_interface.get_categories(force_string(kwargs))
for record in records:
record = self._correct_type(record)
ret.append(record)
return ret
except dbus.DBusException:
if self.__init__():
return self.get_categories(kwargs)
def add_category(self, kwargs):
""" Adds a category to the database """
try:
record = self.dbus_interface.add_category(force_string(kwargs))
return self._correct_type(record)
except dbus.DBusException:
if self.__init__():
return self.add_category(kwargs)
def edit_category(self, kwargs):
""" Edit a record in the database """
try:
record = self.dbus_interface.edit_category(force_string(kwargs))
return self._correct_type(record)
except dbus.DBusException:
if self.__init__():
return self.edit_category(kwargs)
def delete_category(self, key):
""" Delete a record in the database """
try:
return self.dbus_interface.delete_category(key)
except dbus.DBusException:
if self.__init__():
return self.delete_category(kwargs)
|
grupoanfi/orderbook-data-analysis | ODA/test/test_mini_market.py | Python | gpl-3.0 | 11,740 | 0.001533 | __author__ = 'Math'
import unittest
from ODA.market_cols import OrderKeys, ExecutionResponse
from utils import Generear_ordenes_limit
import random
import pandas as pd
from ODA.market import Market, Request
req_ask1 = {
OrderKeys.size: 1000,
OrderKeys.direction: -1,
OrderKeys.price: 1600,
OrderKeys.id_user: 'juanqui',
OrderKeys.group: 'Bancolombia',
OrderKeys.event: 1,
OrderKeys.id_order: 1
}
req_ask2 = {
OrderKeys.size: 1000,
OrderKeys.direction: -1,
OrderKeys.price: 1500,
OrderKeys.id_user: 'juanqui',
OrderKeys.group: 'Bancolombia',
OrderKeys.event: 1,
OrderKeys.id_order: 2
}
req_bid1 = {
OrderKeys.size: 1000,
OrderKeys.direction: 1,
OrderKeys.price: 1400,
OrderKeys.id_user: 'juanqui',
OrderKeys.group: 'Bancolombia',
OrderKeys.event: 1,
OrderKeys.id_order: 3
}
req_bid2 = {
OrderKeys.size: 1000,
OrderKeys.direction: 1,
OrderKeys.price: 1300,
OrderKeys.id_user: 'juanqui',
OrderKeys.group: 'Bancolombia',
OrderKeys.event: 1,
OrderKeys.id_order: 4
}
class TestMiniMArket(unittest.TestCase):
def setUp(self):
self.my_mini_market = Market(1)
self.my_ordens = Generear_ordenes_limit()
def tearDown(self):
# df = pd.DataFrame(self.my_mini_market.e)
# #print self.my_mini_market.executed_reqs
# print df
# #print self.my_mini_market.orders_queue
pass
def test_order_ask(self):
n_orders = random.randint(1, 20)
l_res = []
self.my_ordens.agregar_ordenes_ask(n_orders)
for order in self.my_ordens.list_ordenes_ask:
l_res.append(self.my_mini_market.execute_request(Request(**order)).msg)
self.assertSetEqual(set(l_res), {ExecutionResponse.OK})
l = []
for index in range(n_orders):
for order in self.my_mini_market.orders_queue[-1]:
if order.id_order == index:
l.append(index)
self.assertEqual(l, range(n_orders))
def test_order_bid(self):
n_orders = random.randint(1, 1000)
l_ | res = []
self.my_ordens.agregar_ordenes_bid(n_orders)
for order in self.my_ordens.list_ordenes_bid:
l_res.append(self.my_mini_market.execut | e_request(Request(**order)).msg)
self.assertSetEqual(set(l_res), {ExecutionResponse.OK})
l = []
for index in range(n_orders):
for order in self.my_mini_market.orders_queue[1]:
if order.id_order == index:
l.append(index)
self.assertEqual(l, range(n_orders))
def test_order_limists(self):
self.my_mini_market.conserv_id = False
n_orders = random.randint(1, 1000)
n_ordenes_descartadas = random.choice(
[0, random.randint(1, n_orders - 1)])
self.my_ordens.generar_ordenes_resord(n_orders, cero=(-1) * (
n_ordenes_descartadas - 1))
order_list = self.my_ordens.list_total_ordenes()
l_res = []
for order in order_list:
res = self.my_mini_market.execute_request(Request(**order))
l_res.append(res.msg)
invalid = l_res.count(ExecutionResponse.LIMITPRICEINVALID)
self.assertEqual(invalid, n_ordenes_descartadas)
l = []
orders_in_market = self.my_mini_market.orders_queue[1][:]
orders_in_market.extend(self.my_mini_market.orders_queue[-1])
orders_activ = len(self.my_mini_market.orders_queue[-1]) + len(
self.my_mini_market.orders_queue[1])
for index in range(orders_activ):
for order in orders_in_market:
if order.id_order == index:
l.append(index)
self.assertEqual(l, range(orders_activ))
def test_modification_order(self):
n_orders = random.randint(1, 1000)
n_ordenes_descartadas = 0
self.my_ordens.generar_ordenes_resord(n_orders, cero=(-1) * (
n_ordenes_descartadas - 1))
order_list = self.my_ordens.list_total_ordenes()
for order in order_list:
self.my_mini_market.execute_request(Request(**order))
random_direction = random.choice([-1, 1])
random_order = random.choice(
self.my_mini_market.orders_queue[random_direction])
old_vol = random_order.size
random_exedent = random.randint(0, old_vol)
d = random.choice([-1, 1])
random_vol = random_order.size + random_exedent * d
new_order = random_order.to_dict().copy()
new_order[OrderKeys.size] = random_vol
new_order[OrderKeys.event] = 2
res = self.my_mini_market.execute_request(Request(**new_order))
if res == ExecutionResponse.OK:
index = self.my_mini_market.orders_queue[random_direction].index(
random_order)
order = self.my_mini_market.orders_queue[random_direction][index]
self.assertEqual(order.size, random_vol)
def test_cancelation_order(self):
n_orders = random.randint(1, 1000)
n_ordenes_descartadas = 0
self.my_ordens.generar_ordenes_resord(n_orders, cero=(-1) * (
n_ordenes_descartadas - 1))
order_list = self.my_ordens.list_total_ordenes()
for order in order_list:
self.my_mini_market.execute_request(Request(**order))
random_direction = random.choice([-1, 1])
random_order = random.choice(
self.my_mini_market.orders_queue[random_direction])
new_order = random_order.to_dict().copy()
new_order[OrderKeys.event] = 3
len_before = len(self.my_mini_market.orders_queue[random_direction])
self.my_mini_market.execute_request(Request(**new_order))
len_after = len(self.my_mini_market.orders_queue[random_direction])
self.assertEqual(len_after, len_before - 1)
self.assertRaises(ValueError, self.my_mini_market.orders_queue[
random_direction].index, random_order)
def test_market_order(self):
n_orders = random.randint(1, 10)
n_ordenes_descartadas = 0
self.my_ordens.generar_ordenes_resord(n_orders, cero=(-1) * (
n_ordenes_descartadas - 1), n_price=random.randint(1, 6))
order_list = self.my_ordens.list_total_ordenes()
l_res = []
for order in order_list:
l_res.append(self.my_mini_market.execute_request(Request(**order)))
# print list_success_responses
random_direction = random.choice([-1, 1])
index_rand = random.randint(0, len(
self.my_mini_market.orders_queue[random_direction]))
vol_to_test = 0
for index in range(index_rand):
order = self.my_mini_market.orders_queue[random_direction][index]
vol_to_test += order.size
vol_excedent = random.randint(0, 9)
vol_to_test += vol_excedent
random_type = random.choice([-1, 1])
market_order = {
OrderKeys.size: vol_to_test,
OrderKeys.direction: random_direction,
OrderKeys.price: random.randint(40, 80),
OrderKeys.id_user: self.my_ordens.counter_id.count,
OrderKeys.group: "GROUP_" + str(random.randint(0, 1000000)),
OrderKeys.event: 4,
OrderKeys.type: random_type
}
last_trader = None
vol_last_trader_before = None
if index_rand < len(self.my_mini_market.orders_queue[random_direction]):
last_trader = self.my_mini_market.orders_queue[random_direction][
index_rand]
vol_last_trader_before = \
self.my_mini_market.orders_queue[random_direction][
index_rand].to_dict().copy()[OrderKeys.size]
len_before = len(self.my_mini_market.orders_queue[random_direction])
res = self.my_mini_market.execute_request(Request(**market_order))
#print res
len_after = len(self.my_mini_market.orders_queue[random_direction])
if res == ExecutionResponse.OK:
self.assertEqual(len_before, len_after + index_ra |
suryakencana/niimanga | niimanga/sites/__init__.py | Python | lgpl-3.0 | 2,259 | 0.001771 | """
# Copyright (c) 05 2015 | surya
# 18/05/15 nanang.ask@kubuskotak.com
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# __init__.py.py
"""
import urlparse
from niimanga.libs.exceptions import HtmlError
from requests import request
class Site:
def __init__(self):
| pass
def get_html(self, url, method='GET', **kwargs):
resp = request(method, url, **kwargs)
if resp.status_code != 200:
raise HtmlError({'msg': 'external_request_fail', 'url': url})
return resp.content
def fetch_manga_seed_page(self, url, **kwargs):
return self.get_html(url, **kwargs)
def fetch_chapter_seed_pag | e(self, url, **kwargs):
return self.get_html(url, **kwargs)
def fetch_page_image(self, url, **kwargs):
return self.get_html(url, **kwargs)
def search_by_author(self, author):
"""
Return list of chapter dicts whose keys are:
name
url
site
This should be specifically implemented in each Site subclass. If not,
this method will be used which returns an empty list.
"""
return []
from mangaeden import MangaEden
from batoto import Batoto
available_sites = [
# Kissmanga(),
# Vitaku(),
Batoto(),
# Mangafox(),
# Mangahere(),
# MangaHereMob(),
MangaEden()
]
# Factory function, return instance of suitable "site" class from url
def get_site(url):
netloc = urlparse.urlparse(url).netloc
for site in available_sites:
if netloc in site.netlocs:
return site
return None |
vied12/superdesk | server/apps/validators/validators.py | Python | agpl-3.0 | 645 | 0 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
| # AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import superdesk
class ValidatorsResource(superdesk.Resource):
schema = {
'_id': {'type': 'string', 'required': True},
'schema': {
'type': 'dict',
'required': False
}
}
resource_methods = ['POST']
item_methods = []
class ValidatorsService(superdesk.Ser | vice):
pass
|
freedesktop-unofficial-mirror/gstreamer-sdk__cerbero | test/test_cerbero_packages_linux.py | Python | lgpl-2.1 | 7,324 | 0.000683 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import unittest
import os
from cerbero.config import DEFAULT_PACKAGER
from cerbero.packages import PackageType
from cerbero.packages import linux, PackagerBase
from test.test_common import DummyConfig as Config
from test.test_packages_common import Package1, create_store
packed = []
class LoggerPackager(linux.LinuxPackager):
def pack(self, output_dir, devel, force, keep_temp, pack_deps, tmpdir):
packed.append(self.package.name)
class DummyPackager(linux.LinuxPackager):
def build(self, output_dir, tarname, tmpdir, packagedir, srcdir):
linux.LinuxPackager.build(self, output_dir, tarname, tmpdir,
packagedir, srcdir)
return ['test']
def create_tree(self, tmpdir):
linux.LinuxPackager.create_tree(self, tmpdir)
return ('', '', '')
class DummyTarballPackager(PackagerBase):
def pack(self, output_dir, devel=True, force=False, split=True,
package_prefix=''):
return ['test']
linux.DistTarball = DummyTarballPackager
class LinuxPackagesTest(unittest.TestCase):
def setUp(self):
self.config = Config()
self.store = create_store(self.config)
self.packager = linux.LinuxPackager(self.config,
self.store.get_package('gstreamer-runtime'), self.store)
def testInit(self):
config = Config()
# Test default values
package = Package1(config, None, None)
packager = linux.LinuxPackager(config, package, None)
self.assertEquals(packager.package_prefix, '')
self.assertEquals(packager.full_package_name, 'gstreamer-test1-1.0')
self.assertEquals(packager.packager, DEFAULT_PACKAGER)
# Test packages_prefix and packager
config.packages_prefix = 'test'
config.packager = 'Pin <pan@p.un>'
packager = linux.LinuxPackager(config, package, None)
self.assertEquals(packager.package_prefix, 'test-')
self.assertEquals(packager.full_package_name,
'test-gstreamer-test1-1.0')
self.assertEquals(packager.packager, 'Pin <pan@p.un>')
# Test ignore package
package.ignore_package_prefix = True
packager = linux.LinuxPackager(config, package, None)
self.assertEquals(packager.package_prefix, '')
self.assertEquals(packager.full_package_name, 'gstreamer-test1-1.0')
def testRequires(self):
self.packager._empty_packages = []
expected = sorted(['gstreamer-test-bindings',
'gstreamer-test2',
'gstreamer-test3',
'gstreamer-test1'])
requires = self.packager.get_requires(PackageType.RUNTIME, '-dev')
self.assertEquals(expected, requires)
# test devel packages
requires = self.packager.get_requires(PackageType.DEVEL, '-dev')
self.assertEquals([], requires)
self.store.get_package('gstreamer-test1').has_devel_package = True
requires = self.packager.get_requires(PackageType.DEVEL, '-dev')
self.assertEquals(['gstreamer-test1-dev'], requires)
for p in expected:
self.store.get_package(p).has_devel_package = True
requires = self.packager.get_requires(PackageType.DEVEL, '-dev')
self.assertEquals([x + '-dev' for x in expected], requires)
# test empty packages
self.packager._empty_packages = \
[self.store.get_package('gstreamer-test2')]
requires = self.packager.get_requires(PackageType.RUNTIME, '-dev')
expected.remove('gstreamer-test2')
self.assertEquals(expected, requires)
def testMetaPackageRequire | s(self):
self.packager._empty_packages = []
expected = (['gstreamer-test1'],
['gstreamer-test3'],
['gstreamer-test-bindings'])
self.store.get_package('gstreamer-test1').has_runtime_package = True
self.store.get_package('gstreamer-test3').has_runtime_package = True
self.store.get_package('gstreamer-test-bindings').has_runtime_package = True
requires = self.packager.get_meta_requires( | PackageType.RUNTIME, '')
self.assertEquals(expected, requires)
# test devel packages
requires = self.packager.get_meta_requires(PackageType.DEVEL, '-dev')
self.assertEquals(([], [], []), requires)
# test empty packages
self.store.get_package('gstreamer-test1').has_devel_package = True
requires = self.packager.get_meta_requires(PackageType.DEVEL, '-dev')
self.assertEquals((['gstreamer-test1-dev'], [], []), requires)
for p in [self.store.get_package(x[0]) for x in expected]:
p.has_devel_package = True
requires = self.packager.get_meta_requires(PackageType.DEVEL, '-dev')
expected = (['gstreamer-test1-dev'],
['gstreamer-test3-dev'],
['gstreamer-test-bindings-dev'])
self.assertEquals(expected, requires)
def testPackDeps(self):
expected = sorted(['gstreamer-test-bindings',
'gstreamer-test2',
'gstreamer-test3',
'gstreamer-test1'])
self.packager = LoggerPackager(self.config,
self.store.get_package('gstreamer-runtime'), self.store)
self.packager.devel = False
self.packager.force = False
global packed
packed = []
self.packager.pack_deps('', '', True)
self.assertEquals(sorted(packed), expected)
packed = []
self.packager.devel = False
self.packager.pack_deps('', '', True)
self.assertEquals(sorted(packed), expected)
packed = []
def testPack(self):
self.packager = DummyPackager(self.config,
self.store.get_package('gstreamer-runtime'), self.store)
paths = self.packager.pack('', False, True, True, False, None)
self.assertTrue(os.path.exists('gstreamer-runtime-stamp'))
os.remove('gstreamer-runtime-stamp')
self.assertEquals(paths, ['test'])
self.packager = DummyPackager(self.config,
self.store.get_package('gstreamer-test1'), self.store)
paths = self.packager.pack('', False, True, True, False, None)
self.assertTrue(os.path.exists('gstreamer-test1-stamp'))
os.remove('gstreamer-test1-stamp')
self.assertEquals(paths, ['test'])
|
petertrotman/adventurelookup | server/adventures/models/edition.py | Python | mit | 317 | 0 | "" | "
Model for the edition.
"""
# pylint: disable=too-few-public-methods
from django.db import models
from .mixins import TimestampMixin, DescriptionNotesMixin
class Edition(models.Model, TimestampMixin, DescriptionNotesMixin):
"""
Model for the edition.
"""
name = models.CharField(max_length= | 128)
|
aparo/elasticsearch-cookbook-third-edition | chapter_16/mapping_management.py | Python | bsd-2-clause | 1,158 | 0.008636 | import elasticsearch
es = elasticsearch.Elasticsearch()
index_name = "my_index"
type_name = "my_type"
if es.indices.exists(index_na | me):
es.indices.delete(index_name)
es.indices.create(index_name)
es.cluster.health(wait_for_status="yellow")
es.indices.put_mapping(index=index_name, doc_type=type_name, body={type_name:{"properties": {
"uuid": {"type": "keyword", "store": "true"},
"title": {"type": "text", "store": "true", "term_ve | ctor": "with_positions_offsets"},
"parsedtext": { "type": "text", "store": "true", "term_vector": "with_positions_offsets"},
"nested": {"type": "nested", "properties": {"num": {"type": "integer", "store": "true"},
"name": {"type": "keyword", "store": "true"},
"value": {"type": "keyword", "store": "true"}}},
"date": {"type": "date", "store": "true"},
"position": {"type": "integer", "store": "true"},
"name": {"type": "text", "store": "true", "term_vector": "with_positions_offsets"}}}})
mappings = es.indices.get_mapping(index_name, type_name)
print(mappings)
es.indices.delete(index_name) |
grbd/GBD.Build.BlackJack | blackjack/cmake/vars/CMakeBehavior.py | Python | apache-2.0 | 2,553 | 0.001176 | from .types.CMakeVariable import CMakeVariable
from .types.VariableCollection import VariableCollection
class CMakeBehavior(VariableCollection):
"""CMake Behavior related variables"""
BUILD_SHARED_LIBS = ()
CMAKE_ABSOLUTE_DESTINATION_FILES = ()
CMAKE_APPBUNDLE_PATH = ()
CMAKE_AUTOMOC_RELAXED_MODE = ()
CMAKE_BACKWARDS_COMPATIBILITY = ()
CMAKE_BUILD_TYPE = ()
CMAKE_COLOR_MAKEFILE = ()
CMAKE_CONFIGURATION_TYPES = ()
CMAKE_DEBUG_TARGET_PROPERTIES = ()
CMAKE_ERROR_DEPRECATED = ()
CMAKE_ERROR_ON_ABSOLUTE_INSTALL_DESTINATION = ()
CMAKE_EXPORT_NO_PACKAGE_REGISTRY = ()
CMAKE_SYSROOT = ()
CMAKE_FIND_LIBRARY_PREFIXES = ()
CMAKE_FIND_LIBRARY_SUFFIXES = ()
CMAKE_FIND_NO_INSTALL_PREFIX = ()
CMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY = ()
CMAKE_FIND_PACKAGE_NO_SYSTEM_PACKAGE_REGISTRY = ()
CMAKE_FIND_PACKAGE_WARN_NO_MODULE = ()
CMAKE_FIND_ROOT_PATH = ()
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE = ()
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY = ()
CMAKE_FIND_ROOT_PATH_MODE_PACKAGE = ()
CMAKE_FIND_ROOT_PATH_MODE_PROGRAM = ()
CMAKE_FRAMEWORK_PATH = ()
CMAKE_IGNORE_PATH = ()
CMAKE_INCLUDE_PATH = ()
CMAKE_INCLUDE_DIRECTORIES_BEFORE = ()
CMAKE_INCLUDE_DIRECTORIES_PROJECT_BEFORE = ()
CMAKE_INSTALL_DEFAULT_COMPONENT_NAME = ()
CMAKE_INSTALL_MESSAGE = ()
CMAKE_INSTALL_PREFIX = ()
CMAKE_LIBRARY_PATH = ()
CMAKE_MFC_FLAG = ()
CMAKE_MODULE_PATH = ()
CMAKE_NOT_USING_CONFIG_FLAGS = ()
CMAKE_PREFIX_PATH = ()
CMAKE_PROGRAM_PATH = ()
CMAKE_SKIP_INSTALL_ALL_DEPENDENCY = ()
CMAKE_STAGING_PREFIX = ()
CMAKE_SYSTEM_IGNORE_PATH = ()
CMAKE_SYSTEM_INCLUDE_PATH = ()
CMAKE_SYSTEM_LIBRARY_PATH = ()
CMAKE_SYSTEM_PREFIX_PATH = ()
CMAKE_SYSTEM_PROGRAM_PATH = ()
CMAKE_USER_MAKE_RULES_OVERRIDE = ()
CMAKE_WARN_DEPRECATED = ()
CMAKE_WARN_ON_ABSOLUTE_INSTALL_DESTINATION = ()
@staticmethod
def CMAKE_PROJECT_PROJECT_NAME_INCLUDE(projname: str):
return CMakeVariable("CMAKE_PR | OJECT_" + projname + "_INCLUDE", projname)
@staticmethod
def CMAKE_POLICY_DEFAULT_CMP_NNNN(polnum: str):
return CMakeVariable("CMAKE_POLICY_DEFAULT_CMP" + polnum, polnum)
@staticmethod
def CMAKE_POLICY_WARNING_CMP_NNNN(polnum: str):
return CMakeVariable("CMAKE_POLICY_WARNING_CMP" + polnum, polnum)
@staticmethod
def CMAKE_DISABLE_FIND_PACKAGE_PackageName(pac | kagename: str):
return CMakeVariable("CMAKE_DISABLE_FIND_PACKAGE_" + packagename, packagename)
|
bdh1011/wau | venv/lib/python2.7/site-packages/twisted/test/test_log.py | Python | mit | 36,409 | 0.00195 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.log}.
"""
from __future__ import division, absolute_import, print_function
from twisted.python.compat import _PY3, NativeStringIO as StringIO
import os
import sys
import time
import logging
import warnings
import calendar
from io import IOBase
from twisted.trial import unittest
from twisted.python import log, failure
from twisted.logger.test.test_stdlib import handlerAndBytesIO
from twisted.python.log import LogPublisher
from twisted.logger import (
LoggingFile, LogLevel as NewLogLevel, LogBeginner,
LogPublisher as NewLogPublisher
)
class FakeWarning(Warning):
"""
A unique L{Warning} subclass used by tests for interactions of
L{twisted.python.log} with the L{warnings} module.
"""
class TextFromEventDictTests(unittest.SynchronousTestCase):
"""
Tests for L{textFromEventDict}.
"""
def test_message(self):
"""
The C{"message"} value, when specified, is concatenated to generate the
message.
"""
eventDict = dict(message=("a", "b", "c"))
text = log.textFromEventDict(eventDict)
self.assertEquals(text, "a b c")
def test_format(self):
"""
The C{"format"} value, when specified, is used to format the message.
"""
eventDict = dict(
message=(), isError=0, format="Hello, %(foo)s!", foo="dude"
)
text = log.textFromEventDict(eventDict)
self.assertEquals(text, "Hello, dude!")
def test_noMessageNoFormat(self):
"""
If C{"format"} is unspecified and C{"message"} is empty, return
| C{None}.
"""
eventDict = dict(message=(), isError= | 0)
text = log.textFromEventDict(eventDict)
self.assertIdentical(text, None)
def test_whySpecified(self):
"""
The C{"why"} value, when specified, is first part of message.
"""
try:
raise RuntimeError()
except:
eventDict = dict(
message=(), isError=1, failure=failure.Failure(), why="foo"
)
text = log.textFromEventDict(eventDict)
self.assertTrue(text.startswith("foo\n"))
def test_whyDefault(self):
"""
The C{"why"} value, when unspecified, defaults to C{"Unhandled Error"}.
"""
try:
raise RuntimeError()
except:
eventDict = dict(message=(), isError=1, failure=failure.Failure())
text = log.textFromEventDict(eventDict)
self.assertTrue(text.startswith("Unhandled Error\n"))
def test_noTracebackForYou(self):
"""
If unable to obtain a traceback due to an exception, catch it and note
the error.
"""
# Invalid failure object doesn't implement .getTraceback()
eventDict = dict(message=(), isError=1, failure=object())
text = log.textFromEventDict(eventDict)
self.assertIn("\n(unable to obtain traceback)", text)
class LogTests(unittest.SynchronousTestCase):
def setUp(self):
self.catcher = []
self.observer = self.catcher.append
log.addObserver(self.observer)
self.addCleanup(log.removeObserver, self.observer)
def testObservation(self):
catcher = self.catcher
log.msg("test", testShouldCatch=True)
i = catcher.pop()
self.assertEqual(i["message"][0], "test")
self.assertEqual(i["testShouldCatch"], True)
self.assertIn("time", i)
self.assertEqual(len(catcher), 0)
def testContext(self):
catcher = self.catcher
log.callWithContext({"subsystem": "not the default",
"subsubsystem": "a",
"other": "c"},
log.callWithContext,
{"subsubsystem": "b"}, log.msg, "foo", other="d")
i = catcher.pop()
self.assertEqual(i['subsubsystem'], 'b')
self.assertEqual(i['subsystem'], 'not the default')
self.assertEqual(i['other'], 'd')
self.assertEqual(i['message'][0], 'foo')
def testErrors(self):
for e, ig in [("hello world", "hello world"),
(KeyError(), KeyError),
(failure.Failure(RuntimeError()), RuntimeError)]:
log.err(e)
i = self.catcher.pop()
self.assertEqual(i['isError'], 1)
self.flushLoggedErrors(ig)
def testErrorsWithWhy(self):
for e, ig in [("hello world", "hello world"),
(KeyError(), KeyError),
(failure.Failure(RuntimeError()), RuntimeError)]:
log.err(e, 'foobar')
i = self.catcher.pop()
self.assertEqual(i['isError'], 1)
self.assertEqual(i['why'], 'foobar')
self.flushLoggedErrors(ig)
def test_erroneousErrors(self):
"""
Exceptions raised by log observers are logged but the observer which
raised the exception remains registered with the publisher. These
exceptions do not prevent the event from being sent to other observers
registered with the publisher.
"""
L1 = []
L2 = []
def broken(event):
1 // 0
for observer in [L1.append, broken, L2.append]:
log.addObserver(observer)
self.addCleanup(log.removeObserver, observer)
for i in range(3):
# Reset the lists for simpler comparison.
L1[:] = []
L2[:] = []
# Send out the event which will break one of the observers.
log.msg("Howdy, y'all.", log_trace=[])
# The broken observer should have caused this to be logged.
excs = self.flushLoggedErrors(ZeroDivisionError)
del self.catcher[:]
self.assertEqual(len(excs), 1)
# Both other observers should have seen the message.
self.assertEqual(len(L1), 2)
self.assertEqual(len(L2), 2)
# The first event is delivered to all observers; then, errors
# are delivered.
self.assertEqual(L1[0]['message'], ("Howdy, y'all.",))
self.assertEqual(L2[0]['message'], ("Howdy, y'all.",))
def test_showwarning(self):
"""
L{twisted.python.log.showwarning} emits the warning as a message
to the Twisted logging system.
"""
publisher = log.LogPublisher()
publisher.addObserver(self.observer)
publisher.showwarning(
FakeWarning("unique warning message"), FakeWarning,
"warning-filename.py", 27)
event = self.catcher.pop()
self.assertEqual(
event['format'] % event,
'warning-filename.py:27: twisted.test.test_log.FakeWarning: '
'unique warning message')
self.assertEqual(self.catcher, [])
# Python 2.6 requires that any function used to override the
# warnings.showwarning API accept a "line" parameter or a
# deprecation warning is emitted.
publisher.showwarning(
FakeWarning("unique warning message"), FakeWarning,
"warning-filename.py", 27, line=object())
event = self.catcher.pop()
self.assertEqual(
event['format'] % event,
'warning-filename.py:27: twisted.test.test_log.FakeWarning: '
'unique warning message')
self.assertEqual(self.catcher, [])
def test_warningToFile(self):
"""
L{twisted.python.log.showwarning} passes warnings with an explicit file
target on to the underlying Python warning system.
"""
message = "another unique message"
category = FakeWarning
filename = "warning-filename.py"
lineno = 31
output = StringIO()
log.showwarning(message, category, filename, lineno, file=output)
self.assertEqual(
output.getvalue(),
warnings.formatwarning(message, category, filename, lineno))
# In Python 2.6, warnin |
tswsl1989/powerline-shell | segments/newline.py | Python | mit | 103 | 0 | def add_newline_segment(powerlin | e):
powe | rline.append("\n", Color.RESET, Color.RESET, separator='')
|
HelioGuilherme66/robotframework-selenium2library | src/Selenium2Library/utils/events/event.py | Python | apache-2.0 | 140 | 0 | from builtins import object
import abc
class Event( | object) | :
@abc.abstractmethod
def trigger(self, *args, **kwargs):
pass
|
to-bee/members_python | web/authentication/admin.py | Python | lgpl-3.0 | 821 | 0.002436 | import django
from django.contrib import admin
from authentication.models import LoginUser, Group
@admin.register(LoginUser)
class AdminLoginUser(admin.ModelAdmin):
list_disp | lay = ('username', 'email', 'get_orgs')
search_fields = [
'username', 'email'
]
def get_orgs(self, obj):
return obj.orgs_str
get_orgs.short_description = 'Organization'
def get_form(self, request, obj=None, **kwargs):
# self.fields = []
# self.readonly_fields = []
self.exclude = []
self.exclude.append('last_login')
# self.exclude.append('password')
self.exclude.app | end('user_permissions')
return super(AdminLoginUser, self).get_form(request, obj, **kwargs)
admin.site.register(Group)
admin.site.unregister(django.contrib.auth.models.Group) |
Mariaanisimova/pythonintask | INBa/2015/Shemenev_A_V/task_100_30.py | Python | apache-2.0 | 3,036 | 0.064657 | #Задача №10, Вариант 30
#Напишите программу "Генератор персонажей" для игры.Пользователю должно быть предоставлено 30 пунктов,
#которые можно распределить между четырьмя характеристиками: Сила, Здоровье, Мудр | ость и Ловкость.
#Надо сделать так, чтобы пользователь мог не только брать эти пункты из общего "пула", но и возвращать их туда из характеристик,
#которым он | решил присвоить другие значения.
#Шеменев А.В
#28.04.2016
print ("""
Добро пожаловать в "Генератор персонажей".
Вы можете распределить 30 очков между 4 характеристиками:
Сила, Здоровье, Мудрость и Ловкость. Вы можете как и брать из общего
числа пункотв, так и возвращать. Распределяйте характеристики с умом. Удачи!
""")
STR=0
HP=0
INT=0
AGL=0
point=30
number=0
print("Если хотите изменить Силу, то напишите 'Сила'. Если Здоровье, то 'Здоровье'. Если Мудрость, то 'Мудрость'. Если к Ловкость, то 'Ловкость'.")
while True:
if STR<0 or HP<0 or INT<0 or AGL<0 or point>30:
print("Ошибка")
break
#number=int(input("Напишите снова"))
elif point==0:
print("Вы распределили очки. Их распределение:\nСила:",STR,"\nЗдоровье:",HP,"\nМудрость:",INT,"\nЛовкость:",AGL)
break
print("Ваши очки:\nСила:",STR,"\nЗдоровье:",HP,"\nМудрость:",INT,"\nЛовкость:",AGL,"\nНераспределённые очки:",point)
user_input=input("")
if user_input=="Сила" :
number=int(input("Сколько хотите прибавить (отбавить)?"))
if chislo <= point :
STR+=number
point-=number
else :
print('Слишком много')
elif user_input=="Здоровье":
number=int(input("Сколько хотите прибавить (отбавить)?"))
if chislo <= point :
HP+=number
point-=number
else :
print('Слишком много')
elif user_input=="Мудрость":
number=int(input("Сколько хотите прибавить (отбавить)?"))
if number <= point :
INT+=number
point-=number
else :
print('Слишком много')
elif user_input=="Ловкость":
number=int(input("Сколько хотите прибавить (отбавить)?"))
if chislo <= point :
AGL+=number
point-=number
else :
print('Слишком много')
input("Нажмите Enter для выхода.")
|
superdesk/Live-Blog | plugins/superdesk-person/__plugin__/superdesk_person/gui.py | Python | agpl-3.0 | 429 | 0.004662 | '''
Created on Feb 2, 2012
@package ally core reques | t
@copyright 2011 Sourcefabric o.p.s.
@license http://www.gnu.org/licenses/gpl-3.0.txt
@author: Gabriel Nistor
Contains the GUI configuration setup for the node presenter plugin.
'''
from ..gui_core.gui_co | re import publishGui, publish
# --------------------------------------------------------------------
@publish
def publishJS():
publishGui('superdesk/person')
|
devilry/devilry-django | devilry/devilry_admin/views/common/bulkimport_users_common.py | Python | bsd-3-clause | 5,904 | 0.001863 | import re
from crispy_forms import layout
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.http import HttpResponseRedirect, Http404
from django.utils.translation import gettext_lazy
from cradmin_legacy.crispylayouts import PrimarySubmit
from cradmin_legacy.viewhelpers import formbase
from devilry.devilry_account.models import PermissionGroup
class AbstractTypeInUsersView(formbase.FormView):
users_blob_split_pattern = re.compile(r'[,;\s]+')
create_button_label = gettext_lazy('Save')
template_name = 'devilry_admin/common/abstract-type-in-users.django.html'
def dispatch(self, request, *args, **kwargs):
requestuser_devilryrole = request.cradmin_instance.get_devilryrole_for_requestuser()
if requestuser_devilryrole != PermissionGroup.GROUPTYPE_DEPARTMENTADMIN:
raise Http404()
return super(AbstractTypeInUsersView, self).dispatch(request=request, *args, **kwargs)
def get_backlink_url(self):
raise NotImplementedError()
def get_backlink_label(self):
raise NotImplementedError()
@classmethod
def split_users_blob(cls, users_blob):
"""
Split the given string of users by ``,`` and whitespace.
Returns a set.
"""
users_blob_split = cls.users_blob_split_pattern.split(users_blob)
if len(users_blob_split) == 0:
return []
if users_blob_split[0] == '':
del users_blob_split[0]
if len(users_blob_split) > 0 and users_blob_split[-1] == '':
del users_blob_split[-1]
return set(users_blob_split)
def __get_users_blob_help_text(self):
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
return gettext_lazy('Type or paste in email addresses separated by comma (","), space or one user on each line.')
else:
return gettext_lazy('Type or paste in usernames separated by comma (","), space or one user on each line.')
def __get_users_blob_placeholder(self):
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
return gettext_lazy('jane@example.com\njohn@example.com')
else:
return gettext_lazy('jane\njohn')
def get_form_class(self):
users_blob_help_text = self.__get_users_blob_help_text()
class UserImportForm(forms.Form):
users_blob = forms.CharField(
widget=forms.Textarea,
required=True,
help_text=users_blob_help_text
)
def __validate_users_blob_emails(self, emails):
invalid_emails = []
for email in emails:
try:
validate_email(email)
except ValidationError:
invalid_emails.append(email)
if invalid_emails:
self.add_error(
'users_blob',
gettext_lazy('Invalid email addresses: %(emails)s') % {
'emails': ', '.join(sorted(invalid_emails))
}
)
def __validate_users_blob_usernames(self, usernames):
valid_username_pattern = re.compile(
getattr(settings, 'DEVILRY_VALID_USERNAME_PATTERN', r'^[a-z0-9]+$'))
invalid_usernames = []
for username in usernames:
if not valid_username_pattern.match(username):
invalid_usernames.append(username)
if invalid_usernames:
self.add_error(
'users_blob',
gettext_lazy('Invalid usernames: %(usernames)s') % {
'usernames': ', '.join(sorted(invalid_usernames))
}
)
def clean(self):
cleaned_data = super(UserImportForm, self).clean()
users_blob = cleaned_data.get('users_blob', None)
if users_blob:
users = AbstractTypeInUsersView.split_users_blob(users_blob)
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
self.__validate_users_blob_emails(emails=users)
else:
self.__validate_users_blob_usernames(usernames=users)
self.cleaned_users_set = users
return UserImportForm
def get_field_layout(self):
return [
layout.Div(
layout.Field('users_blob', placeholder=self.__get_users_blob_placeholder()),
css_class='cradmin-globalfields cradmin-legacy-formfield-label-sr-only')
]
def get_buttons(self):
return [
PrimarySubmit('save', self.create_button_label),
]
def get_success_url(self):
return self.request.cradmin_app.reverse_appinde | xurl()
def import_users_from_emails(self, e | mails):
raise NotImplementedError()
def import_users_from_usernames(self, usernames):
raise NotImplementedError()
def form_valid(self, form):
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
self.import_users_from_emails(emails=form.cleaned_users_set)
else:
self.import_users_from_usernames(usernames=form.cleaned_users_set)
return HttpResponseRedirect(str(self.get_success_url()))
def get_context_data(self, **kwargs):
context = super(AbstractTypeInUsersView, self).get_context_data(**kwargs)
context['backlink_url'] = self.get_backlink_url()
context['backlink_label'] = self.get_backlink_label()
context['uses_email_auth_backend'] = settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND
return context
|
jandebleser/django-wiki | src/wiki/views/article.py | Python | gpl-3.0 | 34,566 | 0.000521 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import difflib
import logging |
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.db.models import Q
| from django.http import Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic.base import RedirectView, TemplateView, View
from django.views.generic.edit import FormView
from django.views.generic.list import ListView
from six.moves import range
from wiki import editors, forms, models
from wiki.conf import settings
from wiki.core import permissions
from wiki.core.diff import simple_merge
from wiki.core.exceptions import NoRootURL
from wiki.core.plugins import registry as plugin_registry
from wiki.core.utils import object_to_json_response
from wiki.decorators import get_article
from wiki.views.mixins import ArticleMixin
log = logging.getLogger(__name__)
class ArticleView(ArticleMixin, TemplateView):
template_name = "wiki/view.html"
@method_decorator(get_article(can_read=True))
def dispatch(self, request, article, *args, **kwargs):
return super(
ArticleView,
self).dispatch(
request,
article,
*args,
**kwargs)
def get_context_data(self, **kwargs):
kwargs['selected_tab'] = 'view'
return ArticleMixin.get_context_data(self, **kwargs)
class Create(FormView, ArticleMixin):
form_class = forms.CreateForm
template_name = "wiki/create.html"
@method_decorator(get_article(can_write=True, can_create=True))
def dispatch(self, request, article, *args, **kwargs):
return super(Create, self).dispatch(request, article, *args, **kwargs)
def get_form(self, form_class=None):
"""
Returns an instance of the form to be used in this view.
"""
if form_class is None:
form_class = self.get_form_class()
kwargs = self.get_form_kwargs()
initial = kwargs.get('initial', {})
initial['slug'] = self.request.GET.get('slug', None)
kwargs['initial'] = initial
form = form_class(self.request, self.urlpath, **kwargs)
form.fields['slug'].widget = forms.TextInputPrepend(
prepend='/' + self.urlpath.path,
attrs={
# Make patterns force lowercase if we are case insensitive to bless the user with a
# bit of strictness, anyways
'pattern': '[a-z0-9_-]+' if not settings.URL_CASE_SENSITIVE else '[a-zA-Z0-9_-]+',
'title': 'Lowercase letters, numbers, hyphens and underscores' if not settings.URL_CASE_SENSITIVE else 'Letters, numbers, hyphens and underscores',
}
)
return form
def form_valid(self, form):
user = None
ip_address = None
if not self.request.user.is_anonymous():
user = self.request.user
if settings.LOG_IPS_USERS:
ip_address = self.request.META.get('REMOTE_ADDR', None)
elif settings.LOG_IPS_ANONYMOUS:
ip_address = self.request.META.get('REMOTE_ADDR', None)
try:
self.newpath = models.URLPath.create_article(
self.urlpath,
form.cleaned_data['slug'],
title=form.cleaned_data['title'],
content=form.cleaned_data['content'],
user_message=form.cleaned_data['summary'],
user=user,
ip_address=ip_address,
article_kwargs={'owner': user,
'group': self.article.group,
'group_read': self.article.group_read,
'group_write': self.article.group_write,
'other_read': self.article.other_read,
'other_write': self.article.other_write,
})
messages.success(
self.request,
_("New article '%s' created.") %
self.newpath.article.current_revision.title)
# TODO: Handle individual exceptions better and give good feedback.
except Exception as e:
log.exception("Exception creating article.")
if self.request.user.is_superuser:
messages.error(
self.request,
_("There was an error creating this article: %s") %
str(e))
else:
messages.error(
self.request,
_("There was an error creating this article."))
return redirect('wiki:get', '')
url = self.get_success_url()
return url
def get_success_url(self):
return redirect('wiki:get', self.newpath.path)
def get_context_data(self, **kwargs):
c = ArticleMixin.get_context_data(self, **kwargs)
# Needed since Django 1.9 because get_context_data is no longer called
# with the form instance
if 'form' not in c:
c['form'] = self.get_form()
c['parent_urlpath'] = self.urlpath
c['parent_article'] = self.article
c['create_form'] = c.pop('form', None)
c['editor'] = editors.getEditor()
return c
class Delete(FormView, ArticleMixin):
form_class = forms.DeleteForm
template_name = "wiki/delete.html"
@method_decorator(
get_article(
can_write=True,
not_locked=True,
can_delete=True))
def dispatch(self, request, article, *args, **kwargs):
return self.dispatch1(request, article, *args, **kwargs)
def dispatch1(self, request, article, *args, **kwargs):
"""Deleted view needs to access this method without a decorator,
therefore it is separate."""
urlpath = kwargs.get('urlpath', None)
# Where to go after deletion...
self.next = ""
self.cannot_delete_root = False
if urlpath and urlpath.parent:
self.next = reverse(
'wiki:get',
kwargs={
'path': urlpath.parent.path})
elif urlpath:
# We are a urlpath with no parent. This is the root
self.cannot_delete_root = True
else:
# We have no urlpath. Get it if a urlpath exists
for art_obj in article.articleforobject_set.filter(is_mptt=True):
if art_obj.content_object.parent:
self.next = reverse(
'wiki:get', kwargs={
'article_id': art_obj.content_object.parent.article.id})
else:
self.cannot_delete_root = True
return super(Delete, self).dispatch(request, article, *args, **kwargs)
def get_initial(self):
return {'revision': self.article.current_revision}
def get_form(self, form_class=None):
form = super(Delete, self).get_form(form_class=form_class)
if self.article.can_moderate(self.request.user):
form.fields['purge'].widget = forms.forms.CheckboxInput()
return form
def get_form_kwargs(self):
kwargs = FormView.get_form_kwargs(self)
kwargs['article'] = self.article
kwargs['has_children'] = bool(self.children_slice)
return kwargs
def form_valid(self, form):
cd = form.cleaned_data
purge = cd['purge']
# If we are purging, only moderators can delete articles with children
cannot_delete_children = False
can_moderate = self.article.can_moderate(self.request.user)
if purge and self.children_slice and not can_moderate:
cannot_delete_children = True
if self.cannot_delete_root or cannot_delete_children:
messages.error(
self.request,
_('This article cannot be deleted because it has children or is a root article.'))
r |
abarisain/mopidy | mopidy/utils/encoding.py | Python | apache-2.0 | 217 | 0 | from __future__ import unicode_literals
import locale
def locale_decode(bytestr):
try:
return unicode(bytestr)
except Uni | codeError:
return str(bytestr).decode(locale.getpreferreden | coding())
|
keenerd/wtf | wikipedia.py | Python | bsd-3-clause | 2,140 | 0.009841 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# https | ://en.wikipedia.org/w/index.php?title=List_of_computing_and_IT_abbreviations&action=edit
import re, urllib2
from collections import defaultdict
from BeautifulSoup import BeautifulSoup
pull = lambda url: urllib2.urlopen(urllib2.Request(url))
wikip = lambda article: pull('https://en.wikipedia.org/w/index | .php?title=%s&action=edit' % article)
# todo: List_of_file_formats
def stock():
ad = defaultdict(list)
for line in open('acronyms'):
if '\t' not in line:
continue
line = line.strip()
a,d = line.split('\t')
ad[a].append(d)
for line in open('acronyms.comp'):
if '\t' not in line:
continue
line = line.strip()
a,d = line.split('\t')
ad[a].append(d)
return ad
def exists(key, value, lut):
key = key.upper()
if key not in lut:
return False
value = value.upper()
return any(v.upper()==value for v in lut[key])
def computing_abbrev():
"This parser is very brittle, but the input is very well formed"
wikip = open # uncomment for local debug
html = wikip('List_of_computing_and_IT_abbreviations').read()
soup = BeautifulSoup(html)
text = soup.textarea.contents[0]
ad = defaultdict(list)
for pair in re.findall('\* \[\[.*—.*', str(text)):
try:
a,_,d = pair.partition('—')
a = a[4:].rpartition('|')[-1].replace(']]', '')
d = d.replace('[[', '').replace(']]', '').replace('—', ' - ')
ad[a].append(d.strip())
except:
#print 'failed on', pair
continue
return ad
def main():
"build all the new lists"
# okay, there is just the one for now
ad = computing_abbrev()
stk = stock()
tech = open('acronyms.computing', 'w')
tech.write('$ArchLinux: wikipedia computer abbrevs 2018-05-31\n\n')
for a,ds in sorted(ad.items()):
for d in ds:
if exists(a, d, stk):
continue
tech.write('%s\t%s\n'% (a.upper(), d))
tech.close()
if __name__ == '__main__':
main()
|
andrewsosa/hackfsu_com | api/api/models/__init__.py | Python | apache-2.0 | 1,488 | 0 | """
The Django User class is used to handle users and authentication.
https://docs.djangoproject.com/en/1.10/ref/contrib/auth/
User groups:
| superadmin - Can access django admin page
admin - Can access regular admin pages
hacker - Hacker pages
mentor - Mentor pages
judge - Judge pages
user (implied when logged in) - User pages
"""
from .hackathon import Hackathon
from .hackathon_countdown import HackathonCount | down
from .hackathon_map import HackathonMap
from .hackathon_sponsor import HackathonSponsor
from .hackathon_update import HackathonUpdate
from .attendee_status import AttendeeStatus
from .schedule_item import ScheduleItem
from .school import School
from .anon_stat import AnonStat
from .scan_event import ScanEvent
from .scan_record import ScanRecord
from .user_info import UserInfo
from .hacker_info import HackerInfo
from .judge_info import JudgeInfo
from .mentor_info import MentorInfo
from .organizer_info import OrganizerInfo
from .help_request import HelpRequest
from .subscriber import Subscriber
from .wifi_cred import WifiCred
from .link_key import LinkKey
from .hackathon_prize import HackathonPrize
from .judging_expo import JudgingExpo
from .judging_criteria import JudgingCriteria
from .hack import Hack
from .judging_grade import JudgingGrade
from .judging_assignment import JudgingAssignment
from .preview_email import PreviewEmail
from .nomination import Nomination
# TODO OldParseUser
# TODO OldParseHacker
# TODO OldParseMentor
|
Dallinger/Dallinger | demos/dlgr/demos/bartlett1932/models.py | Python | mit | 758 | 0 | from dallinger.nodes import Source
import random
class WarOfTheGhostsSource(Source):
"""A Source that reads in a random story from a file and transmit | s it."""
__mapper_args__ = {"polymorphic_identity": "war_of_the_ghosts_source"}
def _contents(self):
"""Define the contents of new Infos.
transmit() -> _what() -> create_information() -> _contents().
"""
| stories = [
"ghosts.md",
"cricket.md",
"moochi.md",
"outwit.md",
"raid.md",
"species.md",
"tennis.md",
"vagabond.md",
]
story = random.choice(stories)
with open("static/stimuli/{}".format(story), "r") as f:
return f.read()
|
tmct/adventOfCode2016 | problems/1/Position.py | Python | mit | 514 | 0.001946 | from Di | rection import Direction
class Position:
def __init__(self, direction=Direction.north):
self.x_coord = 0
self.y_coord = 0
self.direction = direction
def turn(self, turn):
self.direction = self.direction.turn(turn)
def walk_forward(self, steps):
self.x_coord += steps * Direction.get_i_component(self.direction)
self.y_coord += steps * Direction.get_j_component(self.direction)
def location(self):
return self.x_coord, self.y_co | ord |
ledeprogram/algorithms | class1/homework/born_mathias_1_1.py | Python | gpl-3.0 | 276 | 0.07971 | def my_mean(list):
length = count_length(list)
sum = 0
for i in list:
sum = sum + i
return sum / length
def count_length(list):
count = 0
for i in list:
if i == '':
break
else:
count += | 1
return count
numbers = [1,2,3,4,5,6,7,8,9]
print( | my_mean(numbers))
|
dlutxx/memo | python/advanced.py | Python | mit | 1,657 | 0.001811 | #-*- coding: utf8 -*-
'''
Examples of advanced Python features:
- metaclass
- descriptor
- generator/forloop
'''
from __future__ import print_function
import sys
if sys.version_info > (3, ): # Python 3
exec('''
def exec_in(code, glob, loc=None):
if isinstance(code, str):
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec(code, glob, loc)
''')
exec_in('''
def with_meta(cls):
class Meta(metaclass=cls):
pass
return Meta
''', globals())
else:
exec('''
def exec_in(code, glob, loc=None):
if isinstance(code, str):
code = compile(code, '', 'exec', dont_inherit=True)
exec code in glob, loc
''')
exec_in('''
def with_meta(cls):
class Meta(object):
__metaclass__ = cls
pass
return Meta
''', globals())
class AnimalMeta(type):
species = 0
def __new__(cls, name, bases, attrs):
if not name == 'Meta':
cls.species += 1
print(
'First, metaclass.__new__ received (metaclass, name, bases, attrs)')
print(cls, name, bases, attrs)
return super(AnimalMeta, cls).__new__(cls, name, bases, attrs)
def __init__(self, name, bases, attrs):
if not name == 'Meta':
print(
'Second, metaclass.__init__ received (self, name, bases, attrs)')
print(self, name, bases, attrs)
def __call__(self, *args, **kwargs):
| print("AnimalMeta.__call__")
return super(AnimalMeta, self).__call__(*args, **kwargs)
class Cat(with_meta(AnimalM | eta)):
name = 'cat'
def __init__(self):
print('Meow')
kit = Cat()
|
teknogods/eaEmu | eaEmu/gamespy/auth.py | Python | gpl-3.0 | 944 | 0.023305 | from __future__ import absolute_import
import struct
import logging
from twisted.internet.protocol import Protocol, ServerFactory
from .. import util
class GamespyAuth(Protocol):
def connectionMade(self):
self.log = util.getLogger('gamespy.auth', self)
def dataReceived(self, data):
hdrFmt = '!4s4sL'
hLen = struct.calcsize(hdrFmt)
lgr, err, length = struct.unpack(hdrFmt, data[:hLen])
data = data[hLen:]
self.log.debug('received: {0}'.format(repr(data)))
#HACKy handling for quick and dirty impl.
if data.startswith('STR=00000000'):
# initial message.
# no body to the response.
msg = struct.pack(hdrFmt, lgr, '\x00'*4, hLen)
self.log.debug('sending: %s', repr(msg))
self.transport.write(msg)
elif data.startswith('STR'):
self.tr | ansport. | loseConnection()
class GamespyAuthFactory(ServerFactory):
protocol = GamespyAuth
|
d3adc0d3/simple-db-app | query_result_window.py | Python | mit | 1,046 | 0.027184 | from PyQt4.QtGui import *
class QueryResultWindow:
def __init__(self):
self.__create_window_widget()
self.__create_ui()
def __create_window_widget(self):
self.__qt_widget_object = QWidget()
self.__qt_widget_object.resize(800, 600)
self.__qt_widget_object.setWindowTitle('Результат запроса')
def __create_ui(self):
query_result_table = QTableWidget()
layout = QVBoxLayout()
| self.__qt_widget_object.setLayout(layout)
layout.addWidget(query_result_table)
self.__query_result_table = query_result_table
def show(self):
self.__qt_widget_object.show()
def set_column_names(self | , columns):
self.__query_result_table.setColumnCount(len(columns))
self.__query_result_table.setHorizontalHeaderLabels(columns)
def set_data(self, data):
self.__query_result_table.setRowCount(len(data))
for i, row in enumerate(data):
for j in range(0, len(row)):
item_value = row[j]
item_text = str(item_value)
item = QTableWidgetItem(item_text)
self.__query_result_table.setItem(i, j, item)
|
aviarypl/mozilla-l10n-addons-server | src/olympia/addons/management/commands/approve_addons.py | Python | bsd-3-clause | 3,058 | 0 | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
import olympia.core.logger
from olympia import amo
from olympia.addons.models import Addon
from olympia.amo.utils import chunked
from olympia.reviewers.utils import ReviewHelper
log = olympia.core.logger.getLogger('z.addons')
class Command(BaseCommand):
help = u'Approve a list of add-ons, given their GUIDs.'
def add_arguments(self, parser):
parser.add_argument('addon_guid', nargs='+')
def handle(self, *args, **options):
confirm = input(
u'Are you sure you want to bulk approve and sign all those {0} '
u'addons? (yes/no)'.format(len(args)))
if confirm != 'yes':
raise CommandError(u'Aborted.')
for chunk in chunked(options['addon_guid'], 100):
files = get_files(chunk)
log.info(u'Bulk approving chunk of %s files', len(files))
files_with_review_type = [
(file_, get_review_type(file_)) for file_ in files]
approve_files(files_with_review_type)
def get_files(addon_guids):
"""Return the list of files that need approval, given a list of GUIDs.
A file needs approval if it's unreviewed.
"""
# Get all the add-ons that have a GUID from the list, and which are either
# reviewed or awaiting a review.
addons = Addon.objects.filter(
guid__in=addon_guids,
status__in=amo.VALID_ADDON_STATUSES)
# Of all those add-ons, we return the list of latest version files that are
# under review, or add-ons that are under review.
files = []
for addon in addons:
files += addon.find_latest_version(
amo.RELEASE_CHANNEL_LISTED).unreviewed_files
return files
def approve_files(files_with_review_type):
"""Approve the files waiting for review (and sign them)."""
for file_, review_type in files_with_review_type:
version = file_.version
addon = version.addon
| helper = ReviewHelper(request=None, addon=addon,
version=file_.version)
# Provide the file to review/sign to the helper.
helper.set_data({'addon_files': [file_],
'comments': u'bulk approval'})
if review_type == 'full':
# Already approved, or waiting for a full review.
| helper.handler.process_public()
log.info(u'File %s (addon %s) approved', file_.pk, addon.pk)
else:
log.info(u'File %s (addon %s) not approved: '
u'addon status: %s, file status: %s',
file_.pk, addon.pk, addon.status, file_.status)
def get_review_type(file_):
"""Return 'full' or None depending on the file/addon status."""
addon_status = file_.version.addon.status
if addon_status == amo.STATUS_NOMINATED or (
addon_status == amo.STATUS_PUBLIC and
file_.status == amo.STATUS_AWAITING_REVIEW):
# Add-on or file is waiting for a full review.
return 'full'
|
leovoel/glc.py | examples/gradient.py | Python | mit | 412 | 0.004854 | from | example_util import get_filename
from glc import Gif
from glc.color import Color, hsva
with Gif(get_filename(__file__)) as a:
a.set_bg_color(Color("black")).set_duration(2).set_size(300, 300)
l = a.render_list
res = 250
c = [hsva((i / res) * 360, 1, 1) for i in range(res)]
l.gradient_pie(x=a.w * 0.5, y | =a.h * 0.5, rotation=[0, 90], rx=a.w * 0.5, ry=a.h * 0.5, colors=c)
a.save()
|
TamiaLab/carnetdumaker | apps/bugtracker/forms.py | Python | agpl-3.0 | 4,597 | 0.002393 | """
Forms for the bug tracker app.
"""
from django import forms
from django.utils.translation import ugettext_lazy as _
from apps.txtrender.forms import MarkupCharField
from apps.contentreport.forms import ContentReportCreationForm
from apps.tools.http_utils import get_client_ip_address
from .models import (IssueTicket,
IssueTicketSubscription,
IssueComment,
BugTrackerUserProfile)
from .notifications import (notify_of_new_comment,
notify_of_new_issue)
class IssueTicketCreationForm(forms.Form):
"""
``IssueTicket`` creation form for registered users only.
"""
title = forms.CharField(widget=forms.TextInput(),
max_length=255,
label=_('Title'))
description = MarkupCharField(label=_('Problem description'))
notify_of_reply = forms.BooleanField(widget=forms.CheckboxInput(),
label=_('Notify me of new reply'),
required=False)
def save(self, request, submitter):
"""
Save the form by creating a new ``IssueTicket``.
:param request: The current request.
:param submitter: The ticket's submitter.
:return The newly created ticket.
"""
new_obj = IssueTicket.objects.create(title=self.cleaned_data['title'],
description=self.cleaned_data['description'],
submitter=submitter,
submitter_ip_address=get_client_ip_address(request))
# Add subscriber if necessary
if self.cleaned_data['notify_of_reply']:
IssueTicketSubscription.objects.subscribe_to_issue(submitter, new_obj)
# Notify subscribers
notify_of_new_issue(new_obj, request, submitter)
# Return the newly created object
return new_obj
class IssueTicketEditionForm(forms.ModelForm):
"""
``IssueTicket`` edition form for registered users only.
"""
class Meta:
model = IssueTicket
fields = ('title',
'description')
class IssueCommentCreationForm(forms.Form):
"""
``IssueComment`` creation form for registered users only.
"""
comment_body = MarkupCharField(label=_('Comment text'))
notify_of_reply = forms.BooleanField(widget=forms.CheckboxInput(),
label=_('Notify me of new reply'),
required=False)
def save(self, request, issue, author):
"""
Save the form by creating a new ``IssueComment`` for the given ``IssueTicket``.
Drop a success flash message after saving.
:param request: The current request.
:param issue: The related issue instance.
:param author: The author of this comment.
"""
new_obj = IssueComment.objects.create(issue=issue,
author=author,
body=self.cleaned_data['comment_body'],
author_ip_address=get_client_ip_address(request))
# Add subscriber if necessary
if self.cleaned_data['notify_of_reply']:
| IssueTicketSubscription.objects.subsc | ribe_to_issue(author, new_obj.issue)
else:
IssueTicketSubscription.objects.unsubscribe_from_issue(author, new_obj.issue)
# Notify subscribers
notify_of_new_comment(issue, new_obj, request, author)
# Return the newly created object
return new_obj
class IssueCommentReportCreationForm(ContentReportCreationForm):
"""
``IssueCommentReport`` creation form for registered users only.
"""
def get_extra_notification_kwargs(self):
"""
Return extra arguments for the notification template.
"""
return {
'content_object_name': 'comment',
'title_template_name': "bugtracker/issue_comment_report_subject.txt",
'message_template_name': "bugtracker/issue_comment_report_body.txt",
'message_template_name_html': "bugtracker/issue_comment_report_body.html",
}
class BugTrackerProfileModificationForm(forms.ModelForm):
"""
Bug tracker user's account modification form.
"""
class Meta:
model = BugTrackerUserProfile
fields = ('notify_of_new_issue',
'notify_of_reply_by_default')
|
DougFirErickson/neon | examples/fast_rcnn_alexnet.py | Python | apache-2.0 | 7,314 | 0.002461 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Trains a Fast-RCNN model on PASCAL VOC dataset.
This Fast-RCNN is based on Alexnet that was pre-trained in ImageI1K using neon.
Reference:
"Fast R-CNN"
http://arxiv.org/pdf/1504.08083v2.pdf
https://github.com/rbgirshick/fast-rcnn
Usage:
python examples/fast_rcnn_alexnet.py -e 100 --save_path frcn_alexnet.pickle
Notes:
1. Neon currently has to process images with batch size being multiple of 32,
and this model uses different learning rate, the training will converge to
the level of caffe model around 100 epochs.
The original caffe model goes through 40000 iteration (mb) of training,
with 2 images per minibatch.
2. The caffe model we used for comparison is based on a pre-trained Alexnet
we trained in Neon and converted into Caffe format. The resulted training
error running in Caffe is close to the one from running the published
Fast-RCNN model using CaffeNet.
Neon support of Fast-RCNN based on VGG16 is coming soon.
3. This example demonstrates the Fast-RCNN training process. Neon support of
the inference pipeline is coming soon.
"""
import os
from neon.backends import gen_backend
from neon.data import PASCALVOC
from neon.data.datasets import Dataset
from neon.initializers import Gaussian, Constant
from neon.transforms import (Rectlin, Softmax, Identity, CrossEntropyMulti,
SmoothL1Loss, ObjectDetection)
from neon.models import Model
from neon.util.argparser import NeonArgparser, extract_valid_args
from neon.optimizers import GradientDescentMomentum, MultiOptimizer
from neon.layers import (Conv, Pooling, Affine, Dropout, RoiPooling,
BranchNode, Multicost, GeneralizedCost,
GeneralizedCostMask, Tree)
from neon.callbacks.callbacks import Callbacks
from neon.util.persist import load_obj
# functions
def load_imagenet_weights(model, path):
# load a pre-trained | Alexnet from Neon model zoo to the local
url = 'https://s3-us-west-1.amazonaws.com/nervana-modelzoo/alexnet/'
filename = 'alexnet.p'
size = 488808400
workdir, filepath = Dataset._valid_path_append(path, '', filename)
if not os.path.exists(filepath):
Dataset.fetch_ | dataset(url, filename, filepath, size)
print 'De-serializing the pre-trained Alexnet using ImageNet I1K ...'
pdict = load_obj(filepath)
param_layers = [l for l in model.layers.layers[0].layers[0].layers]
param_dict_list = pdict['model']['config']['layers']
for layer, ps in zip(param_layers, param_dict_list):
print layer.name, ps['config']['name']
layer.load_weights(ps, load_states=True)
if ps['config']['name'] == 'Pooling_2':
print 'Only load the pre-trained weights up to conv5 layer of Alexnet'
break
# main script
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args(gen_be=False)
# Override save path if None
if args.save_path is None:
args.save_path = 'frcn_alexnet.pickle'
if args.callback_args['save_path'] is None:
args.callback_args['save_path'] = args.save_path
if args.callback_args['serialize'] is None:
args.callback_args['serialize'] = min(args.epochs, 10)
num_epochs = args.epochs
# hyperparameters
args.batch_size = 32
n_mb = None
img_per_batch = args.batch_size
rois_per_img = 64
frcn_fine_tune = False
learning_rate_scale = 1.0/10
if frcn_fine_tune is True:
learning_rate_scale = 1.0/16
# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))
# setup training dataset
train_set = PASCALVOC('trainval', '2007', path=args.data_dir, output_type=0,
n_mb=n_mb, img_per_batch=img_per_batch, rois_per_img=rois_per_img)
# setup layers
b1 = BranchNode(name="b1")
imagenet_layers = [
Conv((11, 11, 64), init=Gaussian(scale=0.01), bias=Constant(0), activation=Rectlin(),
padding=3, strides=4),
Pooling(3, strides=2),
Conv((5, 5, 192), init=Gaussian(scale=0.01), bias=Constant(1), activation=Rectlin(),
padding=2),
Pooling(3, strides=2),
Conv((3, 3, 384), init=Gaussian(scale=0.03), bias=Constant(0), activation=Rectlin(),
padding=1),
Conv((3, 3, 256), init=Gaussian(scale=0.03), bias=Constant(1), activation=Rectlin(),
padding=1),
Conv((3, 3, 256), init=Gaussian(scale=0.03), bias=Constant(1), activation=Rectlin(),
padding=1),
# The following layers are used in Alexnet, but not being used for Fast-RCNN
# Pooling(3, strides=2),
# Affine(nout=4096, init=Gaussian(scale=0.01), bias=Constant(1), activation=Rectlin()),
# Dropout(keep=0.5),
# Affine(nout=4096, init=Gaussian(scale=0.01), bias=Constant(1), activation=Rectlin()),
# Dropout(keep=0.5),
# Affine(nout=1000, init=Gaussian(scale=0.01), bias=Constant(-7), activation=Softmax())
]
class_score = Affine(
nout=21, init=Gaussian(scale=0.01), bias=Constant(0), activation=Softmax())
bbox_pred = Affine(
nout=84, init=Gaussian(scale=0.001), bias=Constant(0), activation=Identity())
frcn_layers = [
RoiPooling(layers=imagenet_layers, HW=(6, 6), bprop_enabled=frcn_fine_tune),
Affine(nout=4096, init=Gaussian(scale=0.005),
bias=Constant(.1), activation=Rectlin()),
Dropout(keep=0.5),
Affine(nout=4096, init=Gaussian(scale=0.005),
bias=Constant(.1), activation=Rectlin()),
Dropout(keep=0.5),
b1,
class_score
]
bb_layers = [
b1,
bbox_pred,
]
# setup optimizer
opt_w = GradientDescentMomentum(0.001 * learning_rate_scale, 0.9, wdecay=0.0005)
opt_b = GradientDescentMomentum(0.002 * learning_rate_scale, 0.9)
optimizer = MultiOptimizer({'default': opt_w, 'Bias': opt_b})
# setup model
model = Model(layers=Tree([frcn_layers, bb_layers]))
# if training a new model, seed the Alexnet conv layers with pre-trained weights
# otherwise, just load the model file
if args.model_file is None:
load_imagenet_weights(model, args.data_dir)
cost = Multicost(costs=[GeneralizedCost(costfunc=CrossEntropyMulti()),
GeneralizedCostMask(costfunc=SmoothL1Loss())],
weights=[1, 1])
callbacks = Callbacks(model, **args.callback_args)
model.fit(train_set, optimizer=optimizer,
num_epochs=num_epochs, cost=cost, callbacks=callbacks)
print 'running eval on the training set...'
metric_train = model.eval(train_set, metric=ObjectDetection())
print 'Train: label accuracy - {}%, object deteciton SmoothL1Loss - {}'.format(
metric_train[0]*100,
metric_train[1])
|
prabeesh/Spark-Kestrel | python/pyspark/worker.py | Python | bsd-3-clause | 2,473 | 0.001617 | """
Worker that receives input from Piped RDD.
"""
import os
import sys
import time
import traceback
from base64 import standard_b64decode
# CloudPickler needs to be imported so that depicklers are registered using the
# copy_reg module.
from pyspark.accumulators import _accumulatorRegistry
from pyspark.broadcast import Broadcast, _broadcastRegistry
from pyspark.cloudpickle import CloudPickler
from pyspark.files import SparkFiles
from pyspark.serializers import write_with_length, read_w | ith_length, w | rite_int, \
read_long, write_long, read_int, dump_pickle, load_pickle, read_from_pickle_file
def load_obj(infile):
return load_pickle(standard_b64decode(infile.readline().strip()))
def report_times(outfile, boot, init, finish):
write_int(-3, outfile)
write_long(1000 * boot, outfile)
write_long(1000 * init, outfile)
write_long(1000 * finish, outfile)
def main(infile, outfile):
boot_time = time.time()
split_index = read_int(infile)
if split_index == -1: # for unit tests
return
spark_files_dir = load_pickle(read_with_length(infile))
SparkFiles._root_directory = spark_files_dir
SparkFiles._is_running_on_worker = True
sys.path.append(spark_files_dir)
num_broadcast_variables = read_int(infile)
for _ in range(num_broadcast_variables):
bid = read_long(infile)
value = read_with_length(infile)
_broadcastRegistry[bid] = Broadcast(bid, load_pickle(value))
func = load_obj(infile)
bypassSerializer = load_obj(infile)
if bypassSerializer:
dumps = lambda x: x
else:
dumps = dump_pickle
init_time = time.time()
iterator = read_from_pickle_file(infile)
try:
for obj in func(split_index, iterator):
write_with_length(dumps(obj), outfile)
except Exception as e:
write_int(-2, outfile)
write_with_length(traceback.format_exc(), outfile)
sys.exit(-1)
finish_time = time.time()
report_times(outfile, boot_time, init_time, finish_time)
# Mark the beginning of the accumulators section of the output
write_int(-1, outfile)
for aid, accum in _accumulatorRegistry.items():
write_with_length(dump_pickle((aid, accum._value)), outfile)
write_int(-1, outfile)
if __name__ == '__main__':
# Redirect stdout to stderr so that users must return values from functions.
old_stdout = os.fdopen(os.dup(1), 'w')
os.dup2(2, 1)
main(sys.stdin, old_stdout)
|
c10k/docgen | cpp_doc_generator.py | Python | mit | 28,176 | 0.002271 | #! /usr/bin/env python3
import argparse
from os.path import basename, splitext, isdir, abspath, join
from os import getcwd
class comments:
def __init__(self, comment_lines):
self.comment_lines = []
temp_buffer = str()
for each_line in comment_lines:
each_line = each_line.strip('*/ \t\n')
if each_line:
if each_line.startswith('@'):
if temp_buffer:
self.comment_lines.append(temp_buffer)
temp_buffer = each_line
else:
temp_buffer += ' ' + each_line
if temp_buffer:
self.comment_lines.append(temp_buffer)
def get_properties(self):
'''
Process the comments and retrieve info from them.
FOR NOW ONLY FUNCTIONS, METHODS OR CTORS WILL BE PARSED.
'''
temp = ''.join(self.comment_lines).split('@')
comment_start = temp[1].split(' ')[0]
if comment_start != "method" and comment_start != "function" and comment_start != "construct":
return False
else:
properties = dict({
'desc': None,
'is_what': None,
'access': None,
'name': None,
'params': [],
'returns': [],
'throws': []
})
for each_line in temp:
if each_line:
line_tag = "Line -> " + each_line
try:
if each_line.startswith("method") or each_line.startswith("func") or each_line.startswith("construct"):
if properties['is_what'] is None:
is_what, name = each_line.split(' ')
properties['is_what'] = is_what
properties['name'] = name
else:
raise Exception(
"Invalid comment.. @func or @method or @construct tag found again in a single comment", line_tag)
| elif each_line.startswith("access"):
if properties['access'] is None:
parsed_access = each_line.split(' ')
if len(parsed_access) == 2:
properties['access'] = parsed_access[1]
else:
raise Exception(
| "Invalid comment.. access val not specified with @access tag", line_tag)
else:
raise Exception(
"Invalid comment.. @access tag found again in a single comment", line_tag)
elif each_line.startswith("desc"):
if properties['desc'] is None:
firstSpacePos = each_line.find(' ')
desc = each_line[firstSpacePos + 1:]
properties['desc'] = desc
else:
raise Exception(
"Invalid comment.. @desc tag found again in a single comment", line_tag)
elif each_line.startswith("param"):
openParenPos = each_line.find('{')
closeParenPos = each_line.find('}')
if openParenPos < 0 or closeParenPos < 0:
raise Exception(
"Invalid comment.. @param '{' or '}' missing", line_tag)
else:
parsed_param = []
type_name = each_line[openParenPos +
1:closeParenPos]
parsed_param.append(type_name)
name_and_desc = each_line[closeParenPos +
2:].split(' ', 1)
if len(name_and_desc) != 2:
raise Exception(
"Invalid comment.. @param tag takes 3 values", line_tag)
else:
parsed_param.extend(name_and_desc)
parsed_param[0], parsed_param[1] = parsed_param[1], parsed_param[0]
properties['params'].append(
parsed_param)
elif each_line.startswith("returns"):
openParenPos = each_line.find('{')
closeParenPos = each_line.find('}')
if openParenPos < 0 or closeParenPos < 0:
raise Exception(
"Invalid comment.. @returns '{' or '}' missing", line_tag)
else:
parsed_ret = []
type_name = each_line[openParenPos +
1:closeParenPos]
parsed_ret.append(type_name)
desc = each_line[closeParenPos + 2:]
parsed_ret.append(desc)
properties['returns'].append(parsed_ret)
else:
raise Exception(
"Invalid comment.. Line starting with unknown tag found", line_tag)
except Exception:
raise Exception(line_tag)
return properties
def __str__(self):
COMMENT_TAG = "=" * 10 + "COMMENT" + "=" * 10 + '\n'
return COMMENT_TAG + '\n'.join(self.comment_lines)
class code:
def __init__(self, code_lines):
self.code_lines = ''.join(code_lines)
def get_properties(self):
'''
Process the code and retrieve info from them.
FOR NOW THIS ONLY RETURNS THE PROTOTYPE OF FUNCS, METHS, CTORS.
'''
firstParenPos = self.code_lines.find('{')
firstSemiColPos = self.code_lines.find(';')
if firstParenPos > 0 and firstSemiColPos > 0:
firstEncountered = min(firstParenPos, firstSemiColPos)
elif firstParenPos > 0 and firstSemiColPos < 0:
firstEncountered = firstParenPos
elif firstParenPos < 0 and firstSemiColPos > 0:
firstEncountered = firstSemiColPos
else:
raise Exception(
"Invalid code.. No ';' or '{' encountered while extracting function prototype, ", self.code_lines)
return self.code_lines[:firstEncountered]
def __str__(self):
CODE_TAG = "." * 10 + "CODE" + "." * 10 + '\n'
return CODE_TAG + self.code_lines
class segment:
def __new__(segment, comment_lines, code_lines):
# This __new__ method only allows object creation if comm.getproperties
# does not return False i.e. the segment object will only be constructed
# if segment is of function/method/constructor.
temp_comm = comments(comment_lines)
res = temp_comm.get_properties()
if res is not False:
return object.__new__(segment)
else:
return None
def __init__(self, comment_lines, code_lines):
self.__comm = comments(comment_lines)
self.__code = code(code_lines)
try:
self.prop = self.__comm.get_properties()
self.prop['prototype'] = self.__code.get_properties()
first_non_whitespace_pos = len(self.prop['prototype']) - len(self.prop['prototype'].lstrip())
if self.prop['prototype'].startswith("inline", first_non_whitespace_pos):
before_inline_part = self.prop['prototype'][:first_non_whitespace_pos]
after_inline_part = self.prop['prototype'][first_n |
radez/packstack | packstack/plugins/puppet_950.py | Python | apache-2.0 | 9,838 | 0.003354 | """
Installs and configures puppet
"""
import sys
import logging
import os
import platform
import time
from packstack.installer import utils
from packstack.installer import basedefs, output_messages
from packstack.installer.exceptions import ScriptRuntimeError
from packstack.modules.common import filtered_hosts
from packstack.modules.ospluginutils import manifestfiles
from packstack.modules.puppet import scan_logfile, validate_logfile
# Controller object will be initialized from main flow
controller = None
# Plugin name
PLUGIN_NAME = "OSPUPPET"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
logging.debug("plugin %s loaded", __name__)
PUPPETDIR = os.path.abspath(os.path.join(basedefs.DIR_PROJECT_DIR, 'puppet'))
MODULEDIR = os.path.join(PUPPETDIR, "modules")
def initConfig(controllerObject):
global controller
controller = controllerObject
logging.debug("Adding OpenStack Puppet configuration")
paramsList = [
]
groupDict = {"GROUP_NAME" : "PUPPET",
"DESCRIPTION" : "Puppet Config parameters",
"PRE_CONDITION" : lambda x: 'yes',
"PRE_CONDITION_MATCH" : "yes",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def initSequences(controller):
puppetpresteps = [
{'title': 'Clean Up', 'functions':[runCleanup]},
]
controller.insertSequence("Clean Up", [], [], puppetpresteps, index=0)
puppetsteps = [
{'title': 'Installing Dependencies',
'functions': [installdeps]},
{'title': 'Copying Puppet modules and manifests',
'functions': [copyPuppetModules]},
{'title': 'Applying Puppet manifests',
'functions': [applyPuppetManifest]},
{'title': 'Finalizing',
'functions': [finalize]}
]
controller.addSequence("Puppet", [], [], puppetsteps)
def runCleanup(config):
localserver = utils.ScriptRunner()
localserver.append("rm -rf %s/*pp" % basedefs.PUPPET_MANIFEST_DIR)
localserver.execute()
def installdeps(config):
for hostname in filtered_hosts(config):
server = utils.ScriptRunner(hostname)
for package in ("puppet", "openssh-clients", "tar", "nc"):
server.append("rpm -q %s || yum install -y %s" % (package, package))
server.execute()
def copyPuppetModules(config):
os_modules = ' '.join(('apache', 'ceilometer', 'cinder', 'concat',
'create_resources', 'firewall', 'glance',
'heat', 'horizon', 'inifile', 'keystone',
'memcached', 'mongodb', 'mysql', 'neutron',
'nova', 'openstack', 'packstack', 'qpid',
'rsync', 'ssh', 'stdlib', 'swift', 'sysctl',
'tempest', 'vcsrepo', 'vlan', 'vswitch',
'xinetd'))
# write puppet manifest to disk
manifestfiles.writeManifests()
server = utils.ScriptRunner()
tar_opts = ""
if platform.linux_distribution()[0] == "Fedora":
tar_opts += "--exclude create_resources "
for hostname in filtered_hosts(config):
host_dir = controller.temp_map[hostname]
server.append("cd %s/puppet" % basedefs.DIR_PROJECT_DIR)
# copy Packstack facts
server.append("tar %s --dereference -cpzf - facts | "
"ssh -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null "
"root@%s tar -C %s -xpzf -" % (tar_opts, hostname, host_dir))
# copy Packstack manifests
server.append("cd %s" % basedefs.PUPPET_MANIFEST_DIR)
server.append("tar %s --dereference -cpzf - ../manifests | "
"ssh -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null "
"root@%s tar -C %s -xpzf -" % (tar_opts, hostname, host_dir))
# copy resources
for path, localname in controller.resources.get(hostname, []):
server.append("scp -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null %s root@%s:%s/resources/%s" %
(path, hostname, host_dir, localname))
# copy Puppet modules required by Packstack
server.append("cd %s/puppet/modules" % basedefs.DIR_PROJECT_DIR)
server.append("tar %s --dereference -cpzf - %s | "
"ssh -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null "
"root@%s tar -C %s -xpzf -" %
| (tar_opts, os_modules, hostname,
os.path.join(host_dir, 'modules')))
server.execute()
def waitforpuppet(currently_running):
global controller
log_len = 0
twirl = ["-","\\","|","/"]
while currently_running:
for hostname, finished_logfile in currently_running:
log_file = os.path.splitext(os.path.basename(finished_logfile))[0]
space_len = basedefs.SPACE_LEN - len(log_file)
| if len(log_file) > log_len:
log_len = len(log_file)
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
twirl = twirl[-1:] + twirl[:-1]
sys.stdout.write(("\rTesting if puppet apply is finished : %s" % log_file).ljust(40 + log_len))
sys.stdout.write("[ %s ]" % twirl[0])
sys.stdout.flush()
try:
# Once a remote puppet run has finished, we retrieve the log
# file and check it for errors
local_server = utils.ScriptRunner()
log = os.path.join(basedefs.PUPPET_MANIFEST_DIR,
os.path.basename(finished_logfile).replace(".finished", ".log"))
local_server.append('scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@%s:%s %s' % (hostname, finished_logfile, log))
# To not pollute logs we turn of logging of command execution
local_server.execute(log=False)
# If we got to this point the puppet apply has finished
currently_running.remove((hostname, finished_logfile))
# clean off the last "testing apply" msg
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
sys.stdout.write(("\r").ljust(45 + log_len))
except ScriptRuntimeError:
# the test raises an exception if the file doesn't exist yet
# TO-DO: We need to start testing 'e' for unexpected exceptions
time.sleep(3)
continue
# check log file for relevant notices
controller.MESSAGES.extend(scan_logfile(log))
# check the log file for errors
validate_logfile(log)
sys.stdout.write(("\r%s : " % log_file).ljust(space_len))
print ("[ " + utils.color_text(output_messages.INFO_DONE, 'green') + " ]")
def applyPuppetManifest(config):
print
currently_running = []
lastmarker = None
for manifest, marker in manifestfiles.getFiles():
# if the marker has changed then we don't want to proceed until
# all of the previous puppet runs have finished
if lastmarker != None and lastmarker != marker:
waitforpuppet(currently_running)
lastmarker = marker
for hostname in filtered_hosts(config):
if "%s_" % hostname not in manifest:
continue
host_dir = controller.temp_map[hostname]
print "Applying " + manifest
server = utils.ScriptRunner(hostname)
man_path = os.path.join(controller.temp_map[hostname],
basedefs.PUPPET_MANIFEST_RELATIVE,
manifest)
running_logfile = "%s.running" % man_path
finished_logfile = "%s.finished" % man_path
currently_runnin |
WisZhou/websocket_messager | utils.py | Python | mit | 904 | 0.001106 | #!/usr/bin/env python
# coding: utf-8
import logging
import config
def get_common_logger(name='common', logfile=None):
'''
args: name (str): logger name
logfile (str): log file, use stream handler (stdout) as default.
return:
logger | obj
'''
my_logger = logging.getLogger(name)
my_logger.setLevel(config.LOG_LEVEL)
if logfile:
handler = logging.FileHandler(logfile)
else:
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s')
handler.setFormatter(formatter)
| my_logger.addHandler(handler)
# Stop logger propagate, forbiden duplicate log.
my_logger.propagate = False
return my_logger
COMMON_LOGGER = get_common_logger('common logger')
if __name__ == '__main__':
COMMON_LOGGER.debug('test')
|
willprice/python-omxplayer-wrapper | tests/unit/test_dbus_connection.py | Python | lgpl-3.0 | 3,009 | 0.000665 | import unittest
from parameterized import parameterized
from mock import patch, Mock
from dbus import DBusException
from omxplayer.dbus_connection import DBusConnection, DBusConnectionError
@patch('dbus.bus.BusConnection')
class DBusConnectionTests(unittest.TestCase):
def setUp(self):
self.proxy = Mock()
self.bus = Mock()
self.bus.get_object = Mock(return_value=self.proxy)
@parameterized.expand([
['unix:abstract=/tmp/dbus-EXAMPLE,g | uid=EXAMPLE'],
['unix:abstract=/tmp/dbus-EXAMPLE2,guid=EXAMPLE2'],
])
def test_connects_to_omxplayer_bus(self, BusConnection, bus_address, *args):
self.create_example_dbus_connection(bus_address)
BusConnection.assert_called_once_with(bus_address)
def test_ | constructs_proxy_for_omxplayer(self, BusConnection, *args):
BusConnection.return_value = self.bus
self.create_example_dbus_connection()
self.bus.get_object.assert_called_once_with(
'org.mpris.MediaPlayer2.omxplayer',
'/org/mpris/MediaPlayer2',
introspect=False)
@parameterized.expand([
['org.mpris.MediaPlayer2'],
['org.mpris.MediaPlayer2.Player'],
['org.freedesktop.DBus.Properties']
])
def test_constructs_dbus_interfaces(self, BusConnection, interface):
with patch('dbus.Interface') as Interface:
BusConnection.return_value = self.bus
self.create_example_dbus_connection()
Interface.assert_any_call(self.proxy, interface)
def test_constructs_root_interface(self, *args):
with patch('dbus.Interface') as Interface:
mpris_interface = Mock()
Interface.return_value = mpris_interface
connection = self.create_example_dbus_connection()
self.assertEqual(mpris_interface, connection.root_interface)
def test_constructs_properties_interface(self, *args):
with patch('dbus.Interface') as Interface:
properties_interface = Mock()
Interface.return_value = properties_interface
connection = self.create_example_dbus_connection()
self.assertEqual(properties_interface,
connection.properties_interface)
def test_constructs_player_interface(self, *args):
with patch('dbus.Interface') as Interface:
player_interface = Mock()
Interface.return_value = player_interface
connection = self.create_example_dbus_connection()
self.assertEqual(player_interface, connection.player_interface)
def test_raises_error_if_cant_obtain_proxy(self, BusConnection):
self.bus.get_object = Mock(side_effect=DBusException)
BusConnection.return_value = self.bus
with self.assertRaises(DBusConnectionError):
connection = self.create_example_dbus_connection()
def create_example_dbus_connection(self, address="example_bus_address"):
return DBusConnection(address)
|
elifesciences/lax | src/publisher/api_v2_views.py | Python | gpl-3.0 | 12,634 | 0.00182 | import json
import jsonschema
from django.core import exceptions as django_errors
from . import models, logic, fragment_logic, utils
from .utils import ensure, isint, toint, lmap
from django.views.decorators.http import require_http_methods
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404
from django.conf import settings
from django.http import HttpResponse as DjangoHttpResponse
from django.core.exceptions import ObjectDoesNotExist
from .models import XML2JSON
from et3.extract import path as p
from et3.render import render_item
import logging
from django.http.multipartparser import parse_header
LOG = logging.getLogger(__name__)
class HttpResponse(DjangoHttpResponse):
@property
def content_type(self):
return self.get("content-type", None)
def _ctype(content_type_key):
"returns a content type for the given `content_type_key`."
assert content_type_key in settings.CONTENT_TYPES
return "application/vnd.elife.article-%s+json" % content_type_key
def ctype(content_type_key, version=None):
"""returns a content type and version header for the given `content_type_key` and `version`.
if no `version` is specified then the latest version is used."""
content_type = _ctype(content_type_key)
current_version = settings.ALL_SCHEMA_IDX[content_type_key][0][0]
version = version or current_version
if version != current_version:
assert version in settings.SCHEMA_VERSIONS[content_type_key]
return "%s; version=%s" % (content_type, version)
def response(data, code=200, content_type=None, headers=None):
"""returns given `data` to the user, assuming it's been encoded properly already.
Assumes a plain text content-type by default."""
content_type = content_type or "text/plain; charset=UTF-8"
headers = headers or {}
resp_obj = HttpResponse(status=code, content_type=content_type, content=data)
for header, header_value in headers.items():
resp_obj[header] = header_value
return resp_obj
def json_response(data, code=200, content_type=None, headers=None):
"dumps given `data` to json and sets a sensible default content-type header."
content_type = content_type or "application/json"
headers = headers or {}
json_string = utils.json_dumps(data)
return response(json_string, code, content_type, headers)
def error_response(code, title, detail=None):
body = {"title": title}
if detail:
body["detail"] = detail
return json_response(body, code)
def http_406():
"returns a HTTP 406 json error response (couldn't negotiate content type with request)."
title = "not acceptable"
detail = "could not negotiate an acceptable response type"
return error_response(406, title, detail)
def http_404(detail=None):
"returns a HTTP 404 json error response (couldn't find requested resource)"
return error_response(404, "not found", detail)
def request_args(request, **overrides):
opts = {}
opts.update(settings.API_OPTS)
opts.update(overrides)
def ispositiveint(param):
def wrap(v):
ensure(
isint(v) and int(v) > 0,
"expecting positive integer for %r parameter" % param,
)
return int(v)
return wrap
def inrange(minpp, maxpp):
def fn(v):
ensure(
v >= minpp and v <= maxpp,
"value must be between %s and %s for 'per-page' parameter"
% (minpp, maxpp),
)
return v
return fn
def asc_or_desc(val):
v = val.strip().upper()[:4]
ensure(
v in ["ASC", "DESC"],
"expecting either 'asc' or 'desc' for 'order' parameter",
)
return v
desc = {
"page": [p("page", opts["page_num"]), ispositiveint("page")],
"per_page": [
p("per-page", opts["per_page"]),
ispositiveint("per-page"),
inrange(opts["min_per_page"], opts["max_per_page"]),
],
"order": [p("order", opts["order_direction"]), str, asc_or_desc],
}
return render_item(desc, request.GET)
def flatten_accept(accepts_header_str):
"returns a list of triples like [(mime, 'version', version), ...]"
lst = []
if not accepts_header_str:
return lst
for mime in accepts_header_str.split(","):
# ('applica | tion/vnd.elife.article-vor+json', {'version': 2})
parsed_mime, parsed_params = parse_header(mime.encode())
# ('*/*', 'version', None)
# ('application/json', 'version', None)
# ('application/vnd.elife.article-poa+json', 'version', 2)
version = parsed_params.pop("version", b"").decode("utf-8")
lst.append((parsed_mime, "version", | version or None))
return lst
def negotiate(accepts_header_str, content_type_key):
"""parses the 'accept-type' header in the request and returns a content-type header and version.
returns `None` if a content-type can't be negotiated."""
# "application/vnd.elife.article-blah+json"
response_mime = _ctype(content_type_key)
# 2
max_content_type_version = settings.ALL_SCHEMA_IDX[content_type_key][0][0]
# ("application/vnd.elife.article-blah+json", 2)
perfect_response = (response_mime, max_content_type_version)
if not accepts_header_str:
# not specified/user accepts anything
return perfect_response
general_cases = ["*/*", "application/*", "application/json"]
acceptable_mimes_list = flatten_accept(accepts_header_str)
versions = []
for acceptable_mime in acceptable_mimes_list:
if acceptable_mime[0] in general_cases:
# user accepts anything
return perfect_response
if acceptable_mime[0] == response_mime:
if not acceptable_mime[-1]:
# user accepts the unqualified content type
return perfect_response
# user is picky about the version of the content type they want.
# we need to make sure the version value isn't bogus.
version = toint(acceptable_mime[-1])
if version and version > 0 and version <= max_content_type_version:
versions.append(version)
if not versions:
# can't figure out what they want
return
return (response_mime, max(versions))
#
#
#
def is_authenticated(request):
# this header is never set, but only for this API because on /articles/42 it works
val = request.META.get(settings.KONG_AUTH_HEADER)
# LOG.info("authenticated? %s type %s" % (val, type(val)))
return val or False
@require_http_methods(["HEAD", "GET"])
def ping(request):
"returns a test response for monitoring, *never* to be cached"
return response(
"pong",
content_type="text/plain; charset=UTF-8",
headers={"Cache-Control": "must-revalidate, no-cache, no-store, private"},
)
@require_http_methods(["HEAD", "GET"])
def article_list(request):
"returns a list of snippets"
authenticated = is_authenticated(request)
try:
kwargs = request_args(request)
kwargs["only_published"] = not authenticated
total, results = logic.latest_article_version_list(**kwargs)
data = {"total": total, "items": lmap(logic.article_snippet_json, results)}
return json_response(data, content_type=ctype(settings.LIST))
except AssertionError as err:
return error_response(400, "bad request", err.message)
@require_http_methods(["HEAD", "GET"])
def article(request, msid):
"return the article-json for the most recent version of the given article ID"
authenticated = is_authenticated(request)
try:
av = logic.most_recent_article_version(msid, only_published=not authenticated)
return json_response(logic.article_json(av), content_type=ctype(av.status))
except models.Article.DoesNotExist:
return http_404()
def article_version_list__v1(request, msid):
"returns a list of versions for the given article ID"
authenticated = is_authenticated(request)
try:
data = logic.a |
cbrucks/Federated_Keystone | tests/test_backend_sql.py | Python | apache-2.0 | 11,834 | 0.000085 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystone import catalog
from keystone.common import sql
from keystone import config
from keystone import exception
from keystone import identity
from keystone import policy
from keystone import test
from keystone import token
from keystone import trust
import default_fixtures
import test_backend
CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
class SqlTests(test.TestCase):
def setUp(self):
super(SqlTests, self).setUp()
self.config([test.etcdir('keystone.conf.sample'),
test.testsdir('test_overrides.conf'),
test.testsdir('backend_sql.conf')])
# initialize managers and override drivers
self.catalog_man = catalog.Manager()
self.identity_man = identity.Manager()
self.token_man = token.Manager()
self.trust_man = trust.Manager()
self.policy_man = policy.Manager()
# create shortcut references to each driver
self.catalog_api = self.catalog_man.driver
self.identity_api = self.identity_man.driver
self.token_api = self.token_man.driver
self.policy_api = self.policy_man.driver
self.trust_api = self.trust_man.driver
# populate the engine with tables & fixtures
self.load_fixtures(default_fixtures)
#defaulted by the data load
self.user_foo['enabled'] = True
def tearDown(self):
sql.set_global_engine(None)
super(SqlTests, self).tearDown()
class SqlIdentity(SqlTests, test_backend.IdentityTests):
def test_delete_user_with_project_association(self):
user = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
'password': uuid.uuid4().hex}
self.identity_man.create_user({}, user['id'], user)
self.identity_api.add_user_to_project(self.tenant_bar['id'],
user['id'])
self.identity_api.delete_user(user['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_projects_for_user,
user['id'])
def test_create_null_user_name(self):
user = {'id': uuid.uuid4().hex,
'name': None,
'domain_id': DEFAULT_DOMAIN_ID,
'password': uuid.uuid4().hex}
self.assertRaises(exception.ValidationError,
self.identity_man.create_user, {},
user['id'],
user)
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user_by_name,
user['name'],
DEFAULT_DOMAIN_ID)
def test_create_null_project_name(self):
tenant = {'id': uuid.uuid4().hex,
'name': None,
'domain_id': DEFAULT_DOMAIN_ID}
self.assertRaises(exception.ValidationError,
self.identity_man.create_project, {},
tenant['id'],
tenant)
self.assertRaises(exception.ProjectNotFound,
self.identity_api.get_project,
tenant['id'])
self.assertRaises(exception.ProjectNotFound,
self.identity_api.get_project_by_name,
tenant['name'],
DEFAULT_DOMAIN_ID)
def test_create_null_role_name(self):
role = {'id': uuid.uuid4().hex,
'name': None}
self.assertRaises(exception.Conflict,
self.identity_api.create_role,
role['id'],
role)
self.assertRaises(exception.RoleNotFound,
self.identity_api.get_role,
role['id'])
def test_delete_project_with_user_association(self):
user = {'id': 'fake',
'name': 'fakeuser',
'domain_id': DEFAULT_DOMAIN_ID,
'password': 'passwd'}
self.identity_man.create_user({}, 'fake', user)
self.identity_api.add_user_to_project(self.tenant_bar['id'],
user['id'])
self.identity_api.delete_project(self.tenant_bar['id'])
tenants = self.identity_api.get_projects_for_user(user['id'])
self.assertEquals(tenants, [])
def test_delete_user_with_metadata(self):
user = {'id': 'fake',
'name': 'fakeuser',
'domain_id': DEFAULT_DOMAIN_ID,
'password': 'passwd'}
self.identity_man.create_user({}, 'fake', user)
self.identity_api.create_metadata(user['id'],
self.tenant_bar['id'],
{'extra': 'extra'})
self.identity_api.delete_user(user['id'])
self.assertRaises(exception.MetadataNotFound,
self.identity_api.get_metadata,
user['id'],
self.tenant_bar['id'])
def test_delete_project_with_metadata(self):
user = {'id': 'fake',
'name': 'fakeuser',
'domain_id': DEFAULT_DOMAIN_ID,
'password': 'passwd'}
self.identity_man.create_user({}, 'fake', user)
self.identity_api.create_metadata(user['id'],
self.tenant_bar['id'],
{'extra': 'extra'})
self.identity_api.delete_project(self.tenant_bar['id'])
self.assertRaises(exception.MetadataNotFound,
self.identity_api.get_metadata,
user['id'],
self.tenant_bar['id'])
def test_update_project_returns_extra(self):
"""This tests for backwards-compatibility | with an essex/folsom bug.
Non-indexed attributes were returned in an 'extra' attribute, instead
of on the entity itself; for consistency and backwards compatibility,
those attributes should be included twice.
This behavior is specific to the SQL driver.
"""
tenant_id = uuid.uuid4().hex
arbitrary_key = uuid.u | uid4().hex
arbitrary_value = uuid.uuid4().hex
tenant = {
'id': tenant_id,
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
arbitrary_key: arbitrary_value}
ref = self.identity_man.create_project({}, tenant_id, tenant)
self.assertEqual(arbitrary_value, ref[arbitrary_key])
self.assertIsNone(ref.get('extra'))
tenant['name'] = uuid.uuid4().hex
ref = self.identity_api.update_project(tenant_id, tenant)
self.assertEqual(arbitrary_value, ref[arbitrary_key])
self.assertEqual(arbitrary_value, ref['extra'][arbitrary_key])
def test_update_user_returns_extra(self):
"""This tests for backwards-compatibility with an essex/folsom bug.
Non-indexed attributes were returned in an 'extra' attribute, instead
of on the entity itself; for consistency and backwards compatibility,
those attributes should be included twice.
This behavior is specific to the SQL driver.
"""
user_id = uuid.uuid4().h |
aplanas/kmanga | kmanga/kmanga/settings.py | Python | gpl-3.0 | 4,546 | 0.00088 | """
Django settings for kmanga project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '18k7lr2so(vnb*y8&yr1b0tf-bcygm7#(%%#inx8fm4pskro1$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kmanga.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kmanga.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.p | assword_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordVali | dator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# KManga specific configuration
INSTALLED_APPS += [
# External applications
'django_rq',
'easy_thumbnails',
# Project applications
'core.apps.CoreConfig',
'proxy.apps.ProxyConfig',
'registration.apps.RegistrationConfig',
'scrapyctl.apps.ScrapyCtlConfig',
]
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
THUMBNAIL_ALIASES = {
'': {
'cover': {
'size': (270, 360),
'crop': 'smart',
'upscale': True,
'bw': True,
},
},
}
RQ_QUEUES = {
'default': {
'HOST': 'localhost',
'PORT': 6379,
'DB': 0,
},
'high': {
'HOST': 'localhost',
'PORT': 6379,
'DB': 0,
},
'low': {
'HOST': 'localhost',
'PORT': 6379,
'DB': 0,
}
}
KINDLEGEN = os.path.join(BASE_DIR, '..', 'bin', 'kindlegen')
# IMAGES_STORE and ISSUES_STORE are also in `scraper` settings
IMAGES_STORE = os.path.join(BASE_DIR, '..', 'scraper', 'img_store')
ISSUES_STORE = os.path.join(BASE_DIR, '..', 'scraper', 'issue_store')
MOBI_STORE = os.path.join(BASE_DIR, '..', 'scraper', 'mobi_store')
VOLUME_MAX_SIZE = 12 * 1024**2
SCRAPY_SETTINGS_MODULE = 'scraper.settings'
SCRAPY_ACCOUNTS = {}
DEFAULT_FROM_EMAIL = 'admin@kmanga.net'
CONTACT_EMAIL = 'admin@kmanga.net'
KMANGA_EMAIL = 'kindle@kmanga.net'
# Import local settings
try:
from .local_settings import *
except ImportError:
pass
|
freedomtan/tensorflow | tensorflow/tools/ci_build/copy_binary.py | Python | apache-2.0 | 4,178 | 0.007899 | #!/usr/bin/python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the Licen | se for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Automatically copy TensorFlow binaries
#
# Usage:
# ./tensorflow/tools/ci_build/copy_binary.py --filename
# tf_nightly/tf_nightly_gpu-1.4.0.dev20170914-cp35-cp35m-manylinux1_x86_64.whl
# --new_py_ver 36
#
"""Copy binaries of TensorFl | ow for different python versions."""
# pylint: disable=superfluous-parens
import argparse
import os
import re
import shutil
import tempfile
import zipfile
TF_NIGHTLY_REGEX = (r"(.+)(tf_nightly.*)-(\d\.[\d]{1,2}"
r"\.\d.dev[\d]{0,8})-(.+)\.whl")
BINARY_STRING_TEMPLATE = "%s-%s-%s.whl"
def check_existence(filename):
"""Check the existence of file or dir."""
if not os.path.exists(filename):
raise RuntimeError("%s not found." % filename)
def copy_binary(directory, origin_tag, new_tag, version, package):
"""Rename and copy binaries for different python versions.
Arguments:
directory: string of directory
origin_tag: str of the old python version tag
new_tag: str of the new tag
version: the version of the package
package: str, name of the package
"""
print("Rename and copy binaries with %s to %s." % (origin_tag, new_tag))
origin_binary = BINARY_STRING_TEMPLATE % (package, version, origin_tag)
new_binary = BINARY_STRING_TEMPLATE % (package, version, new_tag)
zip_ref = zipfile.ZipFile(os.path.join(directory, origin_binary), "r")
try:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
zip_ref.extractall()
zip_ref.close()
old_py_ver = re.search(r"(cp\d\d-cp\d\d)", origin_tag).group(1)
new_py_ver = re.search(r"(cp\d\d-cp\d\d)", new_tag).group(1)
wheel_file = os.path.join(
tmpdir, "%s-%s.dist-info" % (package, version), "WHEEL")
with open(wheel_file, "r") as f:
content = f.read()
with open(wheel_file, "w") as f:
f.write(content.replace(old_py_ver, new_py_ver))
zout = zipfile.ZipFile(directory + new_binary, "w", zipfile.ZIP_DEFLATED)
zip_these_files = [
"%s-%s.dist-info" % (package, version),
"%s-%s.data" % (package, version),
"tensorflow",
"tensorflow_core",
]
for dirname in zip_these_files:
for root, _, files in os.walk(dirname):
for filename in files:
zout.write(os.path.join(root, filename))
zout.close()
finally:
shutil.rmtree(tmpdir)
def main():
"""This script copies binaries.
Requirements:
filename: The path to the whl file
AND
new_py_ver: Create a nightly tag with current date
Raises:
RuntimeError: If the whl file was not found
"""
parser = argparse.ArgumentParser(description="Cherry picking automation.")
# Arg information
parser.add_argument(
"--filename", help="path to whl file we are copying", required=True)
parser.add_argument(
"--new_py_ver", help="two digit py version eg. 27 or 33", required=True)
args = parser.parse_args()
# Argument checking
args.filename = os.path.abspath(args.filename)
check_existence(args.filename)
regex_groups = re.search(TF_NIGHTLY_REGEX, args.filename)
directory = regex_groups.group(1)
package = regex_groups.group(2)
version = regex_groups.group(3)
origin_tag = regex_groups.group(4)
old_py_ver = re.search(r"(cp\d\d)", origin_tag).group(1)
# Create new tags
new_tag = origin_tag.replace(old_py_ver, "cp" + args.new_py_ver)
# Copy the binary with the info we have
copy_binary(directory, origin_tag, new_tag, version, package)
if __name__ == "__main__":
main()
|
dlux/sayHi_tempest_plugin | demo_tempest_plugin/config.py | Python | gpl-3.0 | 595 | 0 | '''
Tempest plugin configuration for say_hi packages
@author: luzC
'''
from oslo_config import cfg
from tempest import config
|
service_available_group = cfg.OptGroup(
name="service_available",
title="Available OpenStack Services"
)
ServiceAvailableGroup = [
cfg.BoolOpt("dluxSay", default=True,
help="Whether or not dluxsay is expected to be available")
]
say_hi_group = cfg. | OptGroup(
name="say_hi",
title="Say hi test variables"
)
SayHiGroup = [
cfg.StrOpt("my_custom_variable", default="custom value",
help="My custom variable.")
]
|
saeranv/UWG_Python | resources/quickstart.py | Python | gpl-3.0 | 587 | 0.003407 | import UWG
import os
# Gets path of current directory
CURR_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
# To run UWG provide the following inputs
epw_directory = os.path.join(CURR_DIRECTORY,"epw") # EPW file directory
epw_filename = "SGP_Singapore.486980_IWEC.epw" # EPW file name
uwg_param_directory = CURR_DIRECTORY # .uwg file directory
uwg_param_filename = "initialize.uwg" # .uwg fil | e name
# Initialize the UWG object
uwg = UWG.UWG(epw_directory, epw_filename, u | wg_param_directory, uwg_param_filename)
# Run the simulation
uwg.run()
|
ChinaMassClouds/copenstack-server | openstack/src/horizon-2014.2/openstack_dashboard/dashboards/admin/aggregates/forms.py | Python | gpl-2.0 | 2,999 | 0.000333 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.aggregates import constants
from openstack_dashboard.openstack.common.log import operate_log
INDEX_URL = constants.AGGREGATES_INDEX_URL
class UpdateAggregateForm(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"),
max_length=255)
availability_zone = forms.CharField(label=_("Availability Zone"),
max_length=255)
def __init__(self, request, *args, **kwargs):
super(UpdateAggregateForm, self).__init__(request, *args, **kwargs)
def handle(self, request, data):
id = self.initial['id']
name = data['name']
availability_zone = data['availability_zone']
aggregate = {'name': name}
if availability_zone:
aggregate['availability_zone'] = availability_zone
try:
api.nova.aggregate_update(request, id, aggregate)
operate_log(request.user.username,
request.user.roles,
da | ta["name"] + "aggregate update")
message = _('Successfully updated aggregate: "%s."') \
% data['name']
messages.success(request, message)
except Exception:
exceptions.handle(request,
_('Unable to update the aggregate.'))
return True
class UpdateMetadataForm(forms.SelfHandlingForm):
def handle(self, request, data):
id = self.initial['id']
old_metadata = self.initial['metadata']
|
try:
new_metadata = json.loads(self.data['metadata'])
metadata = dict(
(item['key'], str(item['value']))
for item in new_metadata
)
for key in old_metadata:
if key not in metadata:
metadata[key] = None
api.nova.aggregate_set_metadata(request, id, metadata)
message = _('Metadata successfully updated.')
messages.success(request, message)
except Exception:
msg = _('Unable to update the aggregate metadata.')
exceptions.handle(request, msg)
return False
return True
|
janusnic/21v-python | unit_14/car8.py | Python | mit | 319 | 0.018809 | #!/usr/bin/python
# -*- coding: utf-8 | -*-
import sqlite3 as lite
import sys
uId = 4
con = lite.connect('test.db')
with con:
cur = con.cursor()
cur.execute("SELECT Name, Price FROM Cars WHERE Id=:Id",
{"Id": uId}) |
con.commit()
row = cur.fetchone()
print row[0], row[1] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.