code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import sys
from PyFBA import lp, log_and_message
def reaction_bounds(reactions, reactions_with_upsr, media, lower=-1000.0, mid=0.0, upper=1000.0, verbose=False):
"""
Set the bounds for each reaction. We set the reactions to run between
either lower/mid, mid/upper, or lower/upper depending on whether the
reaction runs <=, =>, or <=> respectively.
:param reactions: The dict of all reactions we know about
:type reactions: dict of metabolism.Reaction
:param reactions_with_upsr: The sorted list of reactions to run
:type reactions_with_upsr: set
:param media: The media compounds
:type media: set
:param lower: The default lower bound
:type lower: float
:param mid: The default mid value (typically 0)
:type mid: float
:param upper: The default upper bound
:type upper: float
:return: A dict of the reaction ID and the tuple of bounds
:rtype: dict
"""
rbvals = {}
media_uptake_secretion_count = 0
other_uptake_secretion_count = 0
for r in reactions_with_upsr:
if r == 'BIOMASS_EQN':
rbvals[r] = (mid, upper)
continue
# if we already know the bounds, eg from an SBML file or from our uptake/secretion reactions
if reactions[r].lower_bound != None and reactions[r].upper_bound != None:
rbvals[r] = (reactions[r].lower_bound, reactions[r].upper_bound)
continue
if r in reactions:
direction = reactions[r].direction
else:
sys.stderr.write("Did not find {} in reactions\n".format(r))
direction = "="
"""
RAE 16/6/21
We no longer use this block to check for media components. Instead, we us the uptake_and_secretion_reactions
in external_reactions.py to do so.
We assume that if you provide uptake_and_secretion_reactions you have already culled them for the media, though
perhaps we should add a test for that.
"""
if False and (reactions[r].is_uptake_secretion or reactions[r].is_transport or reactions[r].is_input_reaction()):
in_media = False
override = False # if we have external compounds that are not in the media, we don't want to run this as a media reaction
for c in reactions[r].left_compounds:
if c.location == 'e':
if c in media:
in_media = True
else:
override = True
# in this case, we have some external compounds that we should not import.
# for example, H+ is used to translocate things
if override:
in_media = False
if in_media:
# This is what I think it should be:
rbvals[r] = (lower, upper)
#rbvals[r] = (0.0, upper)
media_uptake_secretion_count += 1
else:
rbvals[r] = (0.0, upper)
#rbvals[r] = (lower, upper)
other_uptake_secretion_count += 1
continue
if direction == "=":
# This is what I think it should be:
rbvals[r] = (lower, upper)
# rbvals[r] = (mid, upper)
elif direction == ">":
# This is what I think it should be:
rbvals[r] = (mid, upper)
# rbvals[r] = (lower, upper)
elif direction == "<":
# This is what I think it should be:
# rbvals[r] = (lower, mid)
rbvals[r] = (lower, upper)
else:
sys.stderr.write("DO NOT UNDERSTAND DIRECTION " + direction + " for " + r + "\n")
rbvals[r] = (mid, upper)
if verbose:
sys.stderr.write("In parsing the bounds we found {} media uptake ".format(media_uptake_secretion_count) +
"and secretion reactions and {} other u/s reactions\n".format(other_uptake_secretion_count))
rbounds = [rbvals[r] for r in reactions_with_upsr]
for r in reactions_with_upsr:
if r in reactions:
reactions[r].lower_bound, reactions[r].upper_bound = rbvals[r]
lp.col_bounds(rbounds)
return rbvals
def compound_bounds(cp, lower=0, upper=0):
"""
Impose constraints on the compounds. These constraints limit what
the variation of each compound can be and is essentially 0 for
most compounds except those that are in the media or otherwise
external.
This is the zero flux vector.
Parameters:
cp: the list of compound ids
lower: the default lower value
upper: the default upper value
"""
cbounds = [(lower, upper) for c in cp]
cbvals = {c: (lower, upper) for c in cp}
lp.row_bounds(cbounds)
return cbvals
|
linsalrob/PyFBA
|
PyFBA/fba/bounds.py
|
Python
|
mit
| 4,803
|
from .create2 import *
from .factory import *
__all__ = ["FactoryCreate", "FactorySimulation"]
|
USC-ACTLab/pyCreate2
|
pyCreate2/__init__.py
|
Python
|
mit
| 96
|
# -*- coding: utf-8 -*-
"""This file should be used to run the flask app with something like Gunicorn.
For example: gunicorn -b 0.0.0.0:8000 ocspdash.web.wsgi:app
This file should NOT be imported anywhere, though, since it would instantiate the app.
"""
from ocspdash.web import create_application
app = create_application()
|
scolby33/OCSPdash
|
src/ocspdash/web/wsgi.py
|
Python
|
mit
| 330
|
"""
batch.py
Batch simulation for M1 model using NetPyNE
"""
from netpyne.batch import Batch
import numpy as np
def runBatch(b, label, setup='mpi_bulletin'):
b.batchLabel = label
b.saveFolder = 'data/'+b.batchLabel
b.method = 'grid'
if setup == 'mpi_bulletin':
b.runCfg = {'type': 'mpi_bulletin',
'script': 'init.py',
'skip': True}
elif setup == 'hpc_slurm_comet':
b.runCfg = {'type': 'hpc_slurm',
'allocation': 'csd403',
'walltime': '6:00:00',
'nodes': 1,
'coresPerNode': 24,
'email': 'salvadordura@gmail.com',
'folder': '/home/salvadord/netpyne/examples/batchCell', # startup folder
'script': 'init.py',
'mpiCommand': 'ibrun'} # specific command for Comet
b.run() # run batch
def runBatchComet(b, label):
b.batchLabel = label
b.saveFolder = 'data/'+b.batchLabel
b.method = 'grid'
b.runCfg = {'type': 'mpi_bulletin',
'script': 'init.py',
'skip': True}
b.run() # run batch
def batchNa():
params = {'dendNa': [0.025, 0.03, 0.035, 0.4],
('IClamp1', 'amp'): list(np.arange(-2.0, 8.0, 0.5)/10.0)}
initCfg = {'duration': 1.1, 'tau1NMDA': 15}
b = Batch(params=params, initCfg=initCfg)
runBatch(b, 'batchNa', setup='mpi_bulletin')
def batchNMDA():
params = {'tau1NMDA': [10, 15, 20, 25],
('NetStim1', 'weight'): list(np.arange(1.0, 10.0, 1.0)/1e4)}
initCfg = {'duration': 1.1}
b = Batch(params=params, initCfg=initCfg)
runBatch(b, 'batchNMDA', setup='mpi_bulletin')
def batchNMDAMapping():
params = {'tau1NMDA': [10, 15, 20, 25]}
initCfg = {'duration': 1100}
from cfg import cfg
from netParams import netParams
b = Batch(params=params, initCfg=initCfg, cfg=cfg, netParams=netParams)
runBatch(b, 'batchNMDAMapping', setup='mpi_bulletin')
# Main code
if __name__ == '__main__':
# batchNa()
batchNMDAMapping()
|
Neurosim-lab/netpyne
|
examples/batchCellMapping/batch.py
|
Python
|
mit
| 1,998
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=100)),
('slug', models.SlugField(unique=True, blank=True)),
('site', models.URLField(null=True, blank=True)),
('rate', models.IntegerField(default=50)),
('bank', models.CharField(default=b'anz', max_length=100)),
('bank_account_name', models.CharField(max_length=100)),
('bank_account_no', models.CharField(max_length=30)),
],
options={
},
bases=(models.Model,),
),
]
|
guoqiao/django-nzpower
|
nzpower/migrations/0001_initial.py
|
Python
|
mit
| 981
|
from diary import DiaryDB, Event
import unittest
import sqlite3
import os.path
class TestDiaryDB(unittest.TestCase):
TEMP_DB_PATH = os.path.join(os.path.dirname(__file__),
'testing_dir', 'temp.db')
SIMPLE_EVENT = Event("INFO", "LEVEL")
def setUp(self):
self.logdb = DiaryDB(self.TEMP_DB_PATH)
self.logdb_default = DiaryDB()
@classmethod
def tearDownClass(cls):
import os
os.remove(cls.TEMP_DB_PATH)
def constructs_correctly(self):
self.assertIsInstance(self.logdb.conn, sqlite3.Connection)
self.assertIsInstance(self.logdb.cursor, sqlite3.Cursor)
def test_creates_table(self):
table = self.logdb.cursor.execute('''SELECT name FROM sqlite_master
WHERE type="table" AND name="logs"
''').fetchone()[0]
self.assertEquals(table, 'logs')
def test_creates_table_already_exists(self):
self.logdb.create_tables()
tables = self.logdb.cursor.execute('''SELECT name FROM sqlite_master
WHERE type="table" AND name="logs"
''').fetchall()
self.assertEquals(len(tables), 1)
def test_log(self):
self.logdb.log(self.SIMPLE_EVENT)
entry = self.logdb.cursor.execute('''SELECT * FROM logs ORDER BY
inputDT ASC LIMIT 1''').fetchone()
self.assertEquals(entry[0], self.SIMPLE_EVENT.dt)
self.assertEquals(entry[1], self.SIMPLE_EVENT.level)
self.assertEquals(entry[2], self.SIMPLE_EVENT.info)
def test_close(self):
self.logdb.close()
with self.assertRaises(sqlite3.ProgrammingError,
msg="Cannot operate on a closed database."):
self.logdb.conn.execute("SELECT 1 FROM logs LIMIT 1")
def test_default_path(self):
self.logdb_default.log(self.SIMPLE_EVENT)
entry = self.logdb_default.cursor.execute('''SELECT * FROM logs ORDER BY
inputDT DESC LIMIT 1''').fetchone()
self.assertEquals(entry[0], self.SIMPLE_EVENT.dt)
self.assertEquals(entry[1], self.SIMPLE_EVENT.level)
self.assertEquals(entry[2], self.SIMPLE_EVENT.info)
self.logdb_default.close()
if __name__ == '__main__':
unittest.main()
|
GreenVars/diary
|
tests/logdb_test.py
|
Python
|
mit
| 2,455
|
import os
import textwrap
import glob
from os.path import join, curdir, pardir
import pytest
from pip.utils import appdirs, rmtree
from tests.lib import (pyversion, pyversion_tuple,
_create_test_package, _create_svn_repo, path_to_url)
from tests.lib.local_repos import local_checkout
from tests.lib.path import Path
def test_without_setuptools(script, data):
script.run("pip", "uninstall", "setuptools", "-y")
result = script.run(
"python", "-c",
"import pip; pip.main(["
"'install', "
"'INITools==0.2', "
"'-f', '%s', "
"'--no-use-wheel'])" % data.packages,
expect_error=True,
)
assert (
"setuptools must be installed to install from a source distribution"
in result.stderr
)
def test_pip_second_command_line_interface_works(script, data):
"""
Check if ``pip<PYVERSION>`` commands behaves equally
"""
# On old versions of Python, urllib3/requests will raise a warning about
# the lack of an SSLContext.
kwargs = {}
if pyversion_tuple < (2, 7, 9):
kwargs['expect_stderr'] = True
args = ['pip%s' % pyversion]
args.extend(['install', 'INITools==0.2'])
args.extend(['-f', data.packages])
result = script.run(*args, **kwargs)
egg_info_folder = (
script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion
)
initools_folder = script.site_packages / 'initools'
assert egg_info_folder in result.files_created, str(result)
assert initools_folder in result.files_created, str(result)
@pytest.mark.network
def test_install_from_pypi(script):
"""
Test installing a package from PyPI.
"""
result = script.pip('install', '-vvv', 'INITools==0.2')
egg_info_folder = (
script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion
)
initools_folder = script.site_packages / 'initools'
assert egg_info_folder in result.files_created, str(result)
assert initools_folder in result.files_created, str(result)
def test_editable_install(script):
"""
Test editable installation.
"""
result = script.pip('install', '-e', 'INITools==0.2', expect_error=True)
assert (
"INITools==0.2 should either be a path to a local project or a VCS url"
in result.stderr
)
assert not result.files_created
assert not result.files_updated
def test_install_editable_from_svn(script):
"""
Test checking out from svn.
"""
checkout_path = _create_test_package(script)
repo_url = _create_svn_repo(script, checkout_path)
result = script.pip(
'install',
'-e', 'svn+' + repo_url + '#egg=version-pkg'
)
result.assert_installed('version-pkg', with_files=['.svn'])
@pytest.mark.network
def test_download_editable_to_custom_path(script, tmpdir):
"""
Test downloading an editable using a relative custom src folder.
"""
script.scratch_path.join("customdl").mkdir()
result = script.pip(
'install',
'-e',
'%s#egg=initools-dev' %
local_checkout(
'svn+http://svn.colorstudy.com/INITools/trunk',
tmpdir.join("cache")
),
'--src',
'customsrc',
'--download',
'customdl',
)
customsrc = Path('scratch') / 'customsrc' / 'initools'
assert customsrc in result.files_created, (
sorted(result.files_created.keys())
)
assert customsrc / 'setup.py' in result.files_created, (
sorted(result.files_created.keys())
)
customdl = Path('scratch') / 'customdl' / 'initools'
customdl_files_created = [
filename for filename in result.files_created
if filename.startswith(customdl)
]
assert customdl_files_created
@pytest.mark.network
def test_install_dev_version_from_pypi(script):
"""
Test using package==dev.
"""
result = script.pip(
'install', 'INITools===dev',
'--allow-external', 'INITools',
'--allow-unverified', 'INITools',
expect_error=True,
)
assert (script.site_packages / 'initools') in result.files_created, (
str(result.stdout)
)
def _test_install_editable_from_git(script, tmpdir, wheel):
"""Test cloning from Git."""
if wheel:
script.pip('install', 'wheel')
pkg_path = _create_test_package(script, name='testpackage', vcs='git')
args = ['install', '-e', 'git+%s#egg=testpackage' % path_to_url(pkg_path)]
result = script.pip(*args, **{"expect_error": True})
result.assert_installed('testpackage', with_files=['.git'])
def test_install_editable_from_git(script, tmpdir):
_test_install_editable_from_git(script, tmpdir, False)
def test_install_editable_from_git_autobuild_wheel(script, tmpdir):
_test_install_editable_from_git(script, tmpdir, True)
def test_install_editable_from_hg(script, tmpdir):
"""Test cloning from Mercurial."""
pkg_path = _create_test_package(script, name='testpackage', vcs='hg')
args = ['install', '-e', 'hg+%s#egg=testpackage' % path_to_url(pkg_path)]
result = script.pip(*args, **{"expect_error": True})
result.assert_installed('testpackage', with_files=['.hg'])
def test_vcs_url_final_slash_normalization(script, tmpdir):
"""
Test that presence or absence of final slash in VCS URL is normalized.
"""
pkg_path = _create_test_package(script, name='testpackage', vcs='hg')
args = ['install', '-e', 'hg+%s/#egg=testpackage' % path_to_url(pkg_path)]
result = script.pip(*args, **{"expect_error": True})
result.assert_installed('testpackage', with_files=['.hg'])
def test_install_editable_from_bazaar(script, tmpdir):
"""Test checking out from Bazaar."""
pkg_path = _create_test_package(script, name='testpackage', vcs='bazaar')
args = ['install', '-e', 'bzr+%s/#egg=testpackage' % path_to_url(pkg_path)]
result = script.pip(*args, **{"expect_error": True})
result.assert_installed('testpackage', with_files=['.bzr'])
@pytest.mark.network
def test_vcs_url_urlquote_normalization(script, tmpdir):
"""
Test that urlquoted characters are normalized for repo URL comparison.
"""
script.pip(
'install', '-e',
'%s/#egg=django-wikiapp' %
local_checkout(
'bzr+http://bazaar.launchpad.net/%7Edjango-wikiapp/django-wikiapp'
'/release-0.1',
tmpdir.join("cache"),
),
)
def test_install_from_local_directory(script, data):
"""
Test installing from a local directory.
"""
to_install = data.packages.join("FSPkg")
result = script.pip('install', to_install, expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_info_folder = (
script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion
)
assert fspkg_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
def test_install_from_local_directory_with_symlinks_to_directories(
script, data):
"""
Test installing from a local directory containing symlinks to directories.
"""
to_install = data.packages.join("symlinks")
result = script.pip('install', to_install, expect_error=False)
pkg_folder = script.site_packages / 'symlinks'
egg_info_folder = (
script.site_packages / 'symlinks-0.1.dev0-py%s.egg-info' % pyversion
)
assert pkg_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
def test_install_from_local_directory_with_no_setup_py(script, data):
"""
Test installing from a local directory with no 'setup.py'.
"""
result = script.pip('install', data.root, expect_error=True)
assert not result.files_created
assert "is not installable. File 'setup.py' not found." in result.stderr
def test_editable_install_from_local_directory_with_no_setup_py(script, data):
"""
Test installing from a local directory with no 'setup.py'.
"""
result = script.pip('install', '-e', data.root, expect_error=True)
assert not result.files_created
assert "is not installable. File 'setup.py' not found." in result.stderr
def test_install_as_egg(script, data):
"""
Test installing as egg, instead of flat install.
"""
to_install = data.packages.join("FSPkg")
result = script.pip('install', to_install, '--egg', expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_folder = script.site_packages / 'FSPkg-0.1.dev0-py%s.egg' % pyversion
assert fspkg_folder not in result.files_created, str(result.stdout)
assert egg_folder in result.files_created, str(result)
assert join(egg_folder, 'fspkg') in result.files_created, str(result)
def test_install_curdir(script, data):
"""
Test installing current directory ('.').
"""
run_from = data.packages.join("FSPkg")
# Python 2.4 Windows balks if this exists already
egg_info = join(run_from, "FSPkg.egg-info")
if os.path.isdir(egg_info):
rmtree(egg_info)
result = script.pip('install', curdir, cwd=run_from, expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_info_folder = (
script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion
)
assert fspkg_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
def test_install_pardir(script, data):
"""
Test installing parent directory ('..').
"""
run_from = data.packages.join("FSPkg", "fspkg")
result = script.pip('install', pardir, cwd=run_from, expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_info_folder = (
script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion
)
assert fspkg_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
@pytest.mark.network
def test_install_global_option(script):
"""
Test using global distutils options.
(In particular those that disable the actual install action)
"""
result = script.pip(
'install', '--global-option=--version', "INITools==0.1",
)
assert '0.1\n' in result.stdout
def test_install_with_pax_header(script, data):
"""
test installing from a tarball with pax header for python<2.6
"""
script.pip('install', 'paxpkg.tar.bz2', cwd=data.packages)
def test_install_with_hacked_egg_info(script, data):
"""
test installing a package which defines its own egg_info class
"""
run_from = data.packages.join("HackedEggInfo")
result = script.pip('install', '.', cwd=run_from)
assert 'Successfully installed hackedegginfo-0.0.0\n' in result.stdout
@pytest.mark.network
def test_install_using_install_option_and_editable(script, tmpdir):
"""
Test installing a tool using -e and --install-option
"""
folder = 'script_folder'
script.scratch_path.join(folder).mkdir()
url = 'git+git://github.com/pypa/pip-test-package'
result = script.pip(
'install', '-e', '%s#egg=pip-test-package' %
local_checkout(url, tmpdir.join("cache")),
'--install-option=--script-dir=%s' % folder
)
script_file = (
script.venv / 'src' / 'pip-test-package' /
folder / 'pip-test-package' + script.exe
)
assert script_file in result.files_created
@pytest.mark.network
def test_install_global_option_using_editable(script, tmpdir):
"""
Test using global distutils options, but in an editable installation
"""
url = 'hg+http://bitbucket.org/runeh/anyjson'
result = script.pip(
'install', '--global-option=--version', '-e',
'%s@0.2.5#egg=anyjson' % local_checkout(url, tmpdir.join("cache"))
)
assert 'Successfully installed anyjson' in result.stdout
@pytest.mark.network
def test_install_package_with_same_name_in_curdir(script):
"""
Test installing a package with the same name of a local folder
"""
script.scratch_path.join("mock==0.6").mkdir()
result = script.pip('install', 'mock==0.6')
egg_folder = script.site_packages / 'mock-0.6.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
mock100_setup_py = textwrap.dedent('''\
from setuptools import setup
setup(name='mock',
version='100.1')''')
def test_install_folder_using_dot_slash(script):
"""
Test installing a folder using pip install ./foldername
"""
script.scratch_path.join("mock").mkdir()
pkg_path = script.scratch_path / 'mock'
pkg_path.join("setup.py").write(mock100_setup_py)
result = script.pip('install', './mock')
egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
def test_install_folder_using_slash_in_the_end(script):
r"""
Test installing a folder using pip install foldername/ or foldername\
"""
script.scratch_path.join("mock").mkdir()
pkg_path = script.scratch_path / 'mock'
pkg_path.join("setup.py").write(mock100_setup_py)
result = script.pip('install', 'mock' + os.path.sep)
egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
def test_install_folder_using_relative_path(script):
"""
Test installing a folder using pip install folder1/folder2
"""
script.scratch_path.join("initools").mkdir()
script.scratch_path.join("initools", "mock").mkdir()
pkg_path = script.scratch_path / 'initools' / 'mock'
pkg_path.join("setup.py").write(mock100_setup_py)
result = script.pip('install', Path('initools') / 'mock')
egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
@pytest.mark.network
def test_install_package_which_contains_dev_in_name(script):
"""
Test installing package from pypi which contains 'dev' in name
"""
result = script.pip('install', 'django-devserver==0.0.4')
devserver_folder = script.site_packages / 'devserver'
egg_info_folder = (
script.site_packages / 'django_devserver-0.0.4-py%s.egg-info' %
pyversion
)
assert devserver_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
def test_install_package_with_target(script):
"""
Test installing a package using pip install --target
"""
target_dir = script.scratch_path / 'target'
result = script.pip_install_local('-t', target_dir, "simple==1.0")
assert Path('scratch') / 'target' / 'simple' in result.files_created, (
str(result)
)
# Test repeated call without --upgrade, no files should have changed
result = script.pip_install_local(
'-t', target_dir, "simple==1.0", expect_stderr=True,
)
assert not Path('scratch') / 'target' / 'simple' in result.files_updated
# Test upgrade call, check that new version is installed
result = script.pip_install_local('--upgrade', '-t',
target_dir, "simple==2.0")
assert Path('scratch') / 'target' / 'simple' in result.files_updated, (
str(result)
)
egg_folder = (
Path('scratch') / 'target' / 'simple-2.0-py%s.egg-info' % pyversion)
assert egg_folder in result.files_created, (
str(result)
)
# Test install and upgrade of single-module package
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.0')
singlemodule_py = Path('scratch') / 'target' / 'singlemodule.py'
assert singlemodule_py in result.files_created, str(result)
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.1',
'--upgrade')
assert singlemodule_py in result.files_updated, str(result)
def test_install_package_with_root(script, data):
"""
Test installing a package using pip install --root
"""
root_dir = script.scratch_path / 'root'
result = script.pip(
'install', '--root', root_dir, '-f', data.find_links, '--no-index',
'simple==1.0',
)
normal_install_path = (
script.base_path / script.site_packages / 'simple-1.0-py%s.egg-info' %
pyversion
)
# use distutils to change the root exactly how the --root option does it
from distutils.util import change_root
root_path = change_root(
os.path.join(script.scratch, 'root'),
normal_install_path
)
assert root_path in result.files_created, str(result)
# skip on win/py3 for now, see issue #782
@pytest.mark.skipif("sys.platform == 'win32' and sys.version_info >= (3,)")
def test_install_package_that_emits_unicode(script, data):
"""
Install a package with a setup.py that emits UTF-8 output and then fails.
Refs https://github.com/pypa/pip/issues/326
"""
to_install = data.packages.join("BrokenEmitsUTF8")
result = script.pip(
'install', to_install, expect_error=True, expect_temp=True, quiet=True,
)
assert (
'FakeError: this package designed to fail on install' in result.stdout
)
assert 'UnicodeDecodeError' not in result.stdout
def test_install_package_with_utf8_setup(script, data):
"""Install a package with a setup.py that declares a utf-8 encoding."""
to_install = data.packages.join("SetupPyUTF8")
script.pip('install', to_install)
def test_install_package_with_latin1_setup(script, data):
"""Install a package with a setup.py that declares a latin-1 encoding."""
to_install = data.packages.join("SetupPyLatin1")
script.pip('install', to_install)
def test_url_req_case_mismatch_no_index(script, data):
"""
tar ball url requirements (with no egg fragment), that happen to have upper
case project names, should be considered equal to later requirements that
reference the project name using lower case.
tests/data/packages contains Upper-1.0.tar.gz and Upper-2.0.tar.gz
'requiresupper' has install_requires = ['upper']
"""
Upper = os.path.join(data.find_links, 'Upper-1.0.tar.gz')
result = script.pip(
'install', '--no-index', '-f', data.find_links, Upper, 'requiresupper'
)
# only Upper-1.0.tar.gz should get installed.
egg_folder = script.site_packages / 'Upper-1.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
egg_folder = script.site_packages / 'Upper-2.0-py%s.egg-info' % pyversion
assert egg_folder not in result.files_created, str(result)
def test_url_req_case_mismatch_file_index(script, data):
"""
tar ball url requirements (with no egg fragment), that happen to have upper
case project names, should be considered equal to later requirements that
reference the project name using lower case.
tests/data/packages3 contains Dinner-1.0.tar.gz and Dinner-2.0.tar.gz
'requiredinner' has install_requires = ['dinner']
This test is similar to test_url_req_case_mismatch_no_index; that test
tests behaviour when using "--no-index -f", while this one does the same
test when using "--index-url". Unfortunately this requires a different
set of packages as it requires a prepared index.html file and
subdirectory-per-package structure.
"""
Dinner = os.path.join(data.find_links3, 'Dinner', 'Dinner-1.0.tar.gz')
result = script.pip(
'install', '--index-url', data.find_links3, Dinner, 'requiredinner'
)
# only Upper-1.0.tar.gz should get installed.
egg_folder = script.site_packages / 'Dinner-1.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
egg_folder = script.site_packages / 'Dinner-2.0-py%s.egg-info' % pyversion
assert egg_folder not in result.files_created, str(result)
def test_url_incorrect_case_no_index(script, data):
"""
Same as test_url_req_case_mismatch_no_index, except testing for the case
where the incorrect case is given in the name of the package to install
rather than in a requirements file.
"""
result = script.pip(
'install', '--no-index', '-f', data.find_links, "upper",
)
# only Upper-2.0.tar.gz should get installed.
egg_folder = script.site_packages / 'Upper-1.0-py%s.egg-info' % pyversion
assert egg_folder not in result.files_created, str(result)
egg_folder = script.site_packages / 'Upper-2.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
def test_url_incorrect_case_file_index(script, data):
"""
Same as test_url_req_case_mismatch_file_index, except testing for the case
where the incorrect case is given in the name of the package to install
rather than in a requirements file.
"""
result = script.pip(
'install', '--index-url', data.find_links3, "dinner",
expect_stderr=True,
)
# only Upper-2.0.tar.gz should get installed.
egg_folder = script.site_packages / 'Dinner-1.0-py%s.egg-info' % pyversion
assert egg_folder not in result.files_created, str(result)
egg_folder = script.site_packages / 'Dinner-2.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
@pytest.mark.network
def test_compiles_pyc(script):
"""
Test installing with --compile on
"""
del script.environ["PYTHONDONTWRITEBYTECODE"]
script.pip("install", "--compile", "--no-use-wheel", "INITools==0.2")
# There are many locations for the __init__.pyc file so attempt to find
# any of them
exists = [
os.path.exists(script.site_packages_path / "initools/__init__.pyc"),
]
exists += glob.glob(
script.site_packages_path / "initools/__pycache__/__init__*.pyc"
)
assert any(exists)
@pytest.mark.network
def test_no_compiles_pyc(script, data):
"""
Test installing from wheel with --compile on
"""
del script.environ["PYTHONDONTWRITEBYTECODE"]
script.pip("install", "--no-compile", "--no-use-wheel", "INITools==0.2")
# There are many locations for the __init__.pyc file so attempt to find
# any of them
exists = [
os.path.exists(script.site_packages_path / "initools/__init__.pyc"),
]
exists += glob.glob(
script.site_packages_path / "initools/__pycache__/__init__*.pyc"
)
assert not any(exists)
def test_install_upgrade_editable_depending_on_other_editable(script):
script.scratch_path.join("pkga").mkdir()
pkga_path = script.scratch_path / 'pkga'
pkga_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
version='0.1')
"""))
script.pip('install', '--editable', pkga_path)
result = script.pip('list')
assert "pkga" in result.stdout
script.scratch_path.join("pkgb").mkdir()
pkgb_path = script.scratch_path / 'pkgb'
pkgb_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup
setup(name='pkgb',
version='0.1',
install_requires=['pkga'])
"""))
script.pip('install', '--upgrade', '--editable', pkgb_path)
result = script.pip('list')
assert "pkgb" in result.stdout
def test_install_topological_sort(script, data):
args = ['install', 'TopoRequires4', '-f', data.packages]
res = str(script.pip(*args, expect_error=False))
order1 = 'TopoRequires, TopoRequires2, TopoRequires3, TopoRequires4'
order2 = 'TopoRequires, TopoRequires3, TopoRequires2, TopoRequires4'
assert order1 in res or order2 in res, res
def test_install_wheel_broken(script, data):
script.pip('install', 'wheel')
res = script.pip(
'install', '--no-index', '-f', data.find_links, 'wheelbroken',
expect_stderr=True)
assert "Successfully installed wheelbroken-0.1" in str(res), str(res)
def test_install_builds_wheels(script, data):
# NB This incidentally tests a local tree + tarball inputs
# see test_install_editable_from_git_autobuild_wheel for editable
# vcs coverage.
script.pip('install', 'wheel')
to_install = data.packages.join('requires_wheelbroken_upper')
res = script.pip(
'install', '--no-index', '-f', data.find_links,
to_install, expect_stderr=True)
expected = ("Successfully installed requires-wheelbroken-upper-0"
" upper-2.0 wheelbroken-0.1")
# Must have installed it all
assert expected in str(res), str(res)
root = appdirs.user_cache_dir('pip')
wheels = []
for top, dirs, files in os.walk(root):
wheels.extend(files)
# and built wheels for upper and wheelbroken
assert "Running setup.py bdist_wheel for upper" in str(res), str(res)
assert "Running setup.py bdist_wheel for wheelb" in str(res), str(res)
# But not requires_wheel... which is a local dir and thus uncachable.
assert "Running setup.py bdist_wheel for requir" not in str(res), str(res)
# wheelbroken has to run install
# into the cache
assert wheels != [], str(res)
# and installed from the wheel
assert "Running setup.py install for upper" not in str(res), str(res)
# the local tree can't build a wheel (because we can't assume that every
# build will have a suitable unique key to cache on).
assert "Running setup.py install for requires-wheel" in str(res), str(res)
# wheelbroken has to run install
assert "Running setup.py install for wheelb" in str(res), str(res)
|
habnabit/pip
|
tests/functional/test_install.py
|
Python
|
mit
| 25,668
|
from .ppo import PPOAgent
|
qsheeeeen/Self-Driving-Car
|
rl_toolbox/agent/__init__.py
|
Python
|
mit
| 26
|
#!/usr/bin/env python3
import click
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rc
import numpy as np
import pandas as pd
prop1range = [0.0, 1.0] # VF
prop2range = [0.0, 800.0] # ML
num_ch4_a3 = 2.69015E-05 # from methane-comparison.xlsx
fsl = fs = 8
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
# rc('text', usetex=True)
@click.command()
@click.argument('csv-path', type=click.File())
def figure_ml_vs_vf(csv_path):
num_bins = 40
# figure has to be a little "oversized" so that mpl makes it big enough to fill a 1-column fig.
fig = plt.figure(figsize=(4.5,4.5))
# we only want 5 colors for ch4/uc, where each color is centered at 0,1,2,3,4 +-0.5.
cm = matplotlib.cm.get_cmap("viridis",5)
points = pd.read_csv(csv_path)
points['ch4_uc'] = points.absolute_volumetric_loading * (num_ch4_a3 * points.a * points.b * points.c)
ax = fig.subplots(ncols=1)
ax.set_xlim(prop1range[0], prop1range[1])
ax.set_ylim(prop2range[0], prop2range[1])
ax.set_xticks(prop1range[1] * np.array([0.0, 0.25, 0.5, 0.75, 1.0]))
ax.set_yticks(prop2range[1] * np.array([0.0, 0.25, 0.5, 0.75, 1.0]))
ax.set_xticks(prop1range[1] * np.array(range(0,num_bins + 1))/num_bins, minor=True)
ax.set_yticks(prop2range[1] * np.array(range(0,num_bins + 1))/num_bins, minor=True)
ax.tick_params(axis='x', which='major', labelsize=fs)
ax.tick_params(axis='y', which='major', labelsize=fs)
ax.grid(which='major', axis='both', linestyle='-', color='0.9', zorder=0)
sc = ax.scatter(points.void_fraction_geo, points.absolute_volumetric_loading, zorder=2,
alpha=0.6, s=points.a, edgecolors=None, linewidths=0, c=points.ch4_uc,
cmap=cm, vmin=-0.5, vmax=4.5)
ax.set_xlabel('Void Fraction', fontsize=fsl)
ax.set_ylabel('Methane Loading [V/V]', fontsize=fsl)
# cb = fig.colorbar(sc, ax=ax)
# cb.ax.tick_params(labelsize=fs)
output_path = "figure.png"
fig.savefig(output_path, dpi=1200, bbox_inches='tight')
plt.close(fig)
if __name__ == '__main__':
figure_ml_vs_vf()
|
WilmerLab/HTSOHM-dev
|
analysis/figure_ml_vs_vf.py
|
Python
|
mit
| 2,118
|
from drf_yasg import openapi
organization_params_in_header = openapi.Parameter(
"org", openapi.IN_HEADER, required=True, type=openapi.TYPE_INTEGER
)
organization_params = [
organization_params_in_header,
]
opportunity_list_get_params = [
organization_params_in_header,
openapi.Parameter("name", openapi.IN_QUERY, type=openapi.TYPE_STRING),
openapi.Parameter("account", openapi.IN_QUERY, type=openapi.TYPE_STRING),
openapi.Parameter("stage", openapi.IN_QUERY, type=openapi.TYPE_STRING),
openapi.Parameter("lead_source", openapi.IN_QUERY, type=openapi.TYPE_STRING),
openapi.Parameter("tags", openapi.IN_QUERY, type=openapi.TYPE_STRING),
]
opportunity_detail_get_params = [
organization_params_in_header,
openapi.Parameter(
"opportunity_attachment",
openapi.IN_QUERY,
type=openapi.TYPE_FILE,
),
openapi.Parameter("comment", openapi.IN_QUERY, type=openapi.TYPE_STRING),
]
opportunity_create_post_params = [
organization_params_in_header,
openapi.Parameter(
"name", openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING
),
openapi.Parameter("account", openapi.IN_QUERY, type=openapi.TYPE_INTEGER),
openapi.Parameter("amount", openapi.IN_QUERY, type=openapi.TYPE_INTEGER),
openapi.Parameter("currency", openapi.IN_QUERY, type=openapi.TYPE_STRING),
openapi.Parameter("stage", openapi.IN_QUERY, type=openapi.TYPE_STRING),
openapi.Parameter("lead_source", openapi.IN_QUERY, type=openapi.TYPE_STRING),
openapi.Parameter("probability", openapi.IN_QUERY, type=openapi.TYPE_INTEGER),
openapi.Parameter("teams", openapi.IN_QUERY, type=openapi.TYPE_STRING),
openapi.Parameter("assigned_to", openapi.IN_QUERY, type=openapi.TYPE_STRING),
openapi.Parameter("contacts", openapi.IN_QUERY, type=openapi.TYPE_STRING),
openapi.Parameter(
"due_date", openapi.IN_QUERY, type=openapi.FORMAT_DATE, example="2021-01-13"
),
openapi.Parameter("tags", openapi.IN_QUERY, type=openapi.TYPE_STRING),
openapi.Parameter("description", openapi.IN_QUERY, type=openapi.TYPE_STRING),
openapi.Parameter(
"opportunity_attachment",
openapi.IN_QUERY,
type=openapi.TYPE_FILE,
),
]
opportunity_comment_edit_params = [
organization_params_in_header,
openapi.Parameter("comment", openapi.IN_QUERY, type=openapi.TYPE_STRING),
]
|
MicroPyramid/Django-CRM
|
opportunity/swagger_params.py
|
Python
|
mit
| 2,381
|
"""
WSGI config for Pjs project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Pjs.settings")
application = get_wsgi_application()
|
MSzalowski/Pjs2
|
Pjs/Pjs/wsgi.py
|
Python
|
mit
| 384
|
from datetime import datetime
class PanoplyException(Exception):
def __init__(self, args=None, retryable=True):
super(PanoplyException, self).__init__(args)
self.retryable = retryable
class IncorrectParamError(Exception):
def __init__(self, msg: str = "Incorrect input parametr"):
super().__init__(msg)
class DataSourceException(Exception):
def __init__(self, message, code, exception_cls,
phase, source_type, source_id, database_id):
super().__init__(message)
self.message = message
self.code = code
self.phase = phase
self.source_type = source_type
self.source_id = source_id
self.database_id = database_id
self.exception_cls = exception_cls
self.created_at = datetime.utcnow()
class TokenValidationException(PanoplyException):
def __init__(self, original_error, args=None, retryable=True):
super().__init__(args, retryable)
self.original_error = original_error
|
panoplyio/panoply-python-sdk
|
panoply/errors/exceptions.py
|
Python
|
mit
| 1,017
|
import numpy
import os
import sys
# This script depends on a SJSON parsing package:
# https://pypi.python.org/pypi/SJSON/1.1.0
# https://shelter13.net/projects/SJSON/
# https://bitbucket.org/Anteru/sjson/src
import sjson
if __name__ == "__main__":
if sys.version_info < (3, 4):
print('Python 3.4 or higher needed to run this script')
sys.exit(1)
if len(sys.argv) != 2:
print('Usage: python gen_bit_rate_stats.py <path/to/input_file.sjson>')
sys.exit(1)
input_sjson_file = sys.argv[1]
if not input_sjson_file.endswith('.sjson'):
print('Expected SJSON input file, found: {}'.format(input_sjson_file))
sys.exit(1)
if not os.path.exists(input_sjson_file):
print('Input file not found: {}'.format(input_sjson_file))
sys.exit(1)
with open(input_sjson_file, 'r') as file:
input_sjson_data = sjson.loads(file.read())
input_data_type_def = {
'names': ('algorithm_names', '0', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '32'),
'formats': ('S128', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4')
}
columns_to_extract = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19)
output_csv_file_path = 'D:\\acl-dev\\tools\\graph_generation\\bit_rates.csv'
output_csv_data = []
output_csv_headers = ['Bit Rate']
output_csv_data.append(['0', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '32'])
for entry in input_sjson_data['inputs']:
print('Parsing {} ...'.format(entry['header']))
csv_data = numpy.loadtxt(entry['file'], delimiter=',', dtype=input_data_type_def, skiprows=1, usecols=columns_to_extract)
filter = entry.get('filter', None)
if filter != None:
best_variable_data_mask = csv_data['algorithm_names'] == bytes(entry['filter'], encoding = 'utf-8')
csv_data = csv_data[best_variable_data_mask]
# Strip algorithm name
output_csv_data.append(csv_data[0].tolist()[1:])
output_csv_headers.append(entry['header'])
output_csv_data = numpy.column_stack(output_csv_data)
with open(output_csv_file_path, 'wb') as f:
header = bytes('{}\n'.format(','.join(output_csv_headers)), 'utf-8')
f.write(header)
numpy.savetxt(f, output_csv_data, delimiter=',', fmt=('%s'))
|
nfrechette/acl
|
tools/graph_generation/gen_bit_rate_stats.py
|
Python
|
mit
| 2,309
|
"""
GpuCorrMM-based convolutional layers
"""
import theano
from .. import init
from .. import nonlinearities
from .base import Layer
from .conv import conv_output_length
from theano.sandbox.cuda.basic_ops import gpu_contiguous
from theano.sandbox.cuda.blas import GpuCorrMM
__all__ = [
"MMLayer",
"Conv2DMMLayer",
]
if not theano.config.device.startswith("gpu"):
raise ImportError("requires a GPU to work")
# base class for all layers that rely on GpuCorrMM directly
class MMLayer(Layer):
pass
class Conv2DMMLayer(MMLayer):
def __init__(self, incoming, num_filters, filter_size, strides=(1, 1),
border_mode=None, untie_biases=False, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
pad=None, flip_filters=False, **kwargs):
super(Conv2DMMLayer, self).__init__(incoming, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = filter_size
self.strides = strides
self.untie_biases = untie_biases
self.flip_filters = flip_filters
if border_mode is not None and pad is not None:
raise RuntimeError("You cannot specify both 'border_mode' and "
"'pad'. To avoid ambiguity, please specify "
"only one of them.")
elif border_mode is None and pad is None:
# no option specified, default to valid mode
self.pad = (0, 0)
elif border_mode is not None:
if border_mode == 'valid':
self.pad = (0, 0)
elif border_mode == 'full':
self.pad = (self.filter_size[0] - 1, self.filter_size[1] - 1)
elif border_mode == 'same':
# only works for odd filter size, but the even filter size case
# is probably not worth supporting.
self.pad = ((self.filter_size[0] - 1) // 2,
(self.filter_size[1] - 1) // 2)
else:
raise RuntimeError("Unsupported border_mode for "
"Conv2DMMLayer: %s" % border_mode)
else:
self.pad = pad
self.W = self.create_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
elif self.untie_biases:
output_shape = self.get_output_shape()
self.b = self.create_param(b, (num_filters, output_shape[2],
output_shape[3]), name="b")
else:
self.b = self.create_param(b, (num_filters,), name="b")
self.corr_mm_op = GpuCorrMM(subsample=self.strides, pad=self.pad)
def get_W_shape(self):
num_input_channels = self.input_shape[1]
return (self.num_filters, num_input_channels, self.filter_size[0],
self.filter_size[1])
def get_params(self):
return [self.W] + self.get_bias_params()
def get_bias_params(self):
return [self.b] if self.b is not None else []
def get_output_shape_for(self, input_shape):
batch_size = input_shape[0]
output_rows = conv_output_length(input_shape[2],
self.filter_size[0],
self.strides[0],
'pad', self.pad[0])
output_columns = conv_output_length(input_shape[3],
self.filter_size[1],
self.strides[1],
'pad', self.pad[1])
return (batch_size, self.num_filters, output_rows, output_columns)
def get_output_for(self, input, **kwargs):
filters = self.W
if self.flip_filters:
filters = filters[:, :, ::-1, ::-1] # flip top-down, left-right
contiguous_filters = gpu_contiguous(filters)
contiguous_input = gpu_contiguous(input)
conved = self.corr_mm_op(contiguous_input, contiguous_filters)
if self.b is None:
activation = conved
elif self.untie_biases:
activation = conved + self.b.dimshuffle('x', 0, 1, 2)
else:
activation = conved + self.b.dimshuffle('x', 0, 'x', 'x')
return self.nonlinearity(activation)
|
takacsg84/Lasagne
|
lasagne/layers/corrmm.py
|
Python
|
mit
| 4,504
|
"""
handlers for transactional messaging service
"""
import json
# tornado imports
from tornado.queues import Queue
from tornado import websocket, gen, web
#local imports
from settings import DEBUG
#===============================================================================
# WEBSOCKETS SERVER
#===============================================================================
class messaging_server(web.Application):
"""listener application class"""
def __init__(self, q):
"""listener builder method"""
#define petition handlers to use
handlers = [
(r'/channel', channelHandler, dict(q=q)),
(r'/mirror', mirrorHandler),
]
web.Application.__init__(self, handlers)
#===============================================================================
# TESTING HANDLERS
#===============================================================================
class mirrorHandler(websocket.WebSocketHandler):
"""return to the sender the same message they sent"""
verbose = DEBUG
def open(self):
"""defines the websocket open method"""
pass
@gen.coroutine
def on_message(self, message):
"""mirror income data"""
yield self.write_message(message)
def on_close(self):
"""defines the websocket close method"""
pass
class channelHandler(websocket.WebSocketHandler):
"""class that handles app websockets communication"""
verbose = DEBUG
def initialize(self, q):
"""initialize vigilante handler"""
self.q = q
self.service_functions = {
'create_user': self.create_user,
'login': self.login_user,
'logout': self.logout_user
}
def open(self):
"""defines the websocket open method"""
print('[channel]: started connection')
@gen.coroutine
def on_message(self, message):
"""defines the response to income messages"""
data = json.loads(message)
action = data.get('action')
if action:
print(message)
self.service_functions[action](message)
else:
print('[channelHandler]: must give an action')
self.write_message(
json.dumps({'error': [0, 'there is no action in request']})
)
self.write_message(message)
def on_close(self):
"""defines the websocket close method"""
pass
def create_user(self, message):
# IMPLEMETAR LOGICA DEL SERVICIO AQUI
# 1. vaidar si la informacion esta completa
# se necesita al menos: name, password
# se pide tambien el correo, (trabajar en el modelo de bd de usuario)
# 2. validar si usuario no existe
# ir a la base de datos y ver si existe el user_name que llego
# mandar mensaje de ya existente
# 3. validar si esta bien la contraseña
# minimo 8 caracteres, letras y numeros al menos
# mandar un mensaje de contraseña mala
# 4. crear objeto usuario si pasa todas las validaciones
# completar con defaults datos no obtenidos
# 5. almacenar informacion del usuario
# 6. devolver una respuesta al cliente
# TODO: definir modelo de base de datos (christian)
# TODO: seleccionar orm (edwin)
# TODO: validar si usuario existe (edwin)
# TODO: crear registro de usuario (edwin)
# TODO: completar datos del json para insercion (christian)
# TODO: funcion de validar contraseña (christian)
pass
def login_user(self, message):
# IMPLEMETAR LOGICA DEL SERVICIO AQUI
pass
def logout_user(self, message):
# IMPLEMETAR LOGICA DEL SERVICIO AQUI
pass
|
pythonpopayan/bermoto
|
backend/handlers/transactional_messaging.py
|
Python
|
mit
| 3,778
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# TWX documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 27 15:07:02 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'TWX'
copyright = '2015, Vince Castellano, Phillip Lopo'
author = 'Vince Castellano, Phillip Lopo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0b3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = {
'github_user': 'datamachine',
'github_repo': 'twx',
'description': 'Telegram Bot API and MTProto Clients',
'github_banner': True,
'github_button': True,
'show_powered_by': False,
#'link': '#0088cc',
#'sidebar_link': '#0088cc',
#'anchor': '#0088cc',
'gray_1': '#0088cc',
'gray_2': '#ecf3f8',
#'gray_3': '#0088cc',
#'pre_bg': '#ecf3f8',
#'font_family': "'Lucida Grande', 'Lucida Sans Unicode', Arial, Helvetica, Verdana, sans-serif",
#'head_font_family': "'Lucida Grande', 'Lucida Sans Unicode', Arial, Helvetica, Verdana, sans-serif"
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TWXdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TWX.tex', 'TWX Documentation',
'Vince Castellano, Phillip Lopo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'twx', 'TWX Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TWX', 'TWX Documentation',
author, 'TWX', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
datamachine/twx
|
docs/conf.py
|
Python
|
mit
| 10,106
|
# encoding=utf-8
from courtesy import courtesy_reply
from log_decorator import error_logging
@error_logging('na')
def response(com, answer, **kwargs):
return courtesy_reply(('na',), **kwargs)
|
tztztztztz/robot
|
response/no_answer.py
|
Python
|
mit
| 198
|
import pytest
import re
import capybara
from capybara.exceptions import ElementNotFound
class TestAssertAllOfSelectors:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/with_html")
def test_does_not_raise_if_the_given_selectors_are_on_the_page(self, session):
session.assert_all_of_selectors("css", "p a#foo", "h2#h2one", "h2#h2two")
def test_raises_if_any_of_the_given_selectors_are_not_on_the_page(self, session):
with pytest.raises(ElementNotFound):
session.assert_all_of_selectors("css", "p a#foo", "h2#h2three", "h2#h2two")
def test_uses_default_selector(self, session):
capybara.default_selector = "css"
with pytest.raises(ElementNotFound):
session.assert_all_of_selectors("p a#foo", "h2#h2three", "h2#h2two")
session.assert_all_of_selectors("p a#foo", "h2#h2one", "h2#h2two")
def test_respects_scopes(self, session):
with session.scope("//p[@id='first']"):
session.assert_all_of_selectors(".//a[@id='foo']")
with pytest.raises(ElementNotFound):
session.assert_all_of_selectors(".//a[@id='red']")
def test_applies_options_to_all_locators(self, session):
session.assert_all_of_selectors("field", "normal", "additional_newline", field_type="textarea")
with pytest.raises(ElementNotFound):
session.assert_all_of_selectors("field", "normal", "test_field", "additional_newline", field_type="textarea")
@pytest.mark.requires("js")
def test_does_not_raise_error_if_all_the_elements_appear_before_given_wait_duration(self, session):
with capybara.using_wait_time(0.1):
session.visit("/with_js")
session.click_link("Click me")
session.assert_all_of_selectors("css", "a#clickable", "a#has-been-clicked", "#drag", wait=0.9)
class TestAssertNoneOfSelectors:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/with_html")
def test_raises_if_any_of_the_given_selectors_are_on_the_page(self, session):
with pytest.raises(ElementNotFound):
session.assert_none_of_selectors("xpath", "//p", "//a")
with pytest.raises(ElementNotFound):
session.assert_none_of_selectors("xpath", "//abbr", "//a")
with pytest.raises(ElementNotFound):
session.assert_none_of_selectors("css", "p a#foo")
def test_does_not_raise_if_any_of_the_given_selectors_are_not_on_the_page(self, session):
session.assert_none_of_selectors("xpath", "//abbr", "//td")
session.assert_none_of_selectors("css", "p a#doesnotexist", "abbr")
def test_uses_default_selector(self, session):
capybara.default_selector = "css"
session.assert_none_of_selectors("css", "p a#doesnotexist", "abbr")
with pytest.raises(ElementNotFound):
session.assert_none_of_selectors("abbr", "p a#foo")
def test_respects_scopes(self, session):
with session.scope("//p[@id='first']"):
with pytest.raises(ElementNotFound):
session.assert_none_of_selectors(".//a[@id='foo']")
session.assert_none_of_selectors(".//a[@id='red']")
def test_applies_the_options_to_all_locators(self, session):
with pytest.raises(ElementNotFound):
session.assert_none_of_selectors("//p//a", text="Redirect")
session.assert_none_of_selectors("//p", text="Doesnotexist")
def test_discards_all_matches_where_the_given_regexp_is_matched(self, session):
with pytest.raises(ElementNotFound):
session.assert_none_of_selectors("//p//a", text=re.compile(r"re[dab]i", re.IGNORECASE), count=1)
session.assert_none_of_selectors("//p//a", text=re.compile(r"Red$"))
@pytest.mark.requires("js")
def test_does_not_find_elements_if_they_appear_after_given_wait_duration(self, session):
with capybara.using_wait_time(0.1):
session.visit("/with_js")
session.click_link("Click me")
session.assert_none_of_selectors("css", "#new_field", "a#has-been-clicked", wait=0.1)
|
elliterate/capybara.py
|
capybara/tests/session/test_assert_all_of_selectors.py
|
Python
|
mit
| 4,160
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Category', fields ['name']
db.create_unique(u'website_category', ['name'])
def backwards(self, orm):
# Removing unique constraint on 'Category', fields ['name']
db.delete_unique(u'website_category', ['name'])
models = {
u'website.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'blank': 'True'}),
'typo': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'website.keyword': {
'Meta': {'ordering': "['codname']", 'unique_together': "(('codname', 'category'),)", 'object_name': 'Keyword'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keywords'", 'to': u"orm['website.Category']"}),
'codname': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'blank': 'True'})
}
}
complete_apps = ['website']
|
LucasMagnum/pyexplain
|
pyexplain/website/migrations/0003_auto__add_unique_category_name.py
|
Python
|
mit
| 1,760
|
import glob
import os
import numpy as np
def load_emebeddings(folder):
files = [filename for filename in glob.iglob(folder + '**', recursive=True) if not os.path.isdir(filename)]
w2v = {}
for file in files:
with open(file, "r", encoding='utf8') as lines:
for line in lines:
# based on http://nadbordrozd.github.io/blog/2016/05/20/text-classification-with-word2vec/
w2v[line.split()[0]] = line.split()[1:]
return w2v
# derive a vector by multiplying the tfidf with every line of the embeddingg vector
# then sum all embedding vectors together
# then divide by the sum of the length of all embedding vectors
def compute_embedding_vec(embeddings, doc_tfidf):
intersect = set(doc_tfidf.keys()).intersection(embeddings.keys())
res = []
divisor = 0
for term in intersect:
_tfidf = doc_tfidf[term]
divisor += _tfidf
emb_vec = embeddings[term]
for emb in range(len(emb_vec)):
try:
res[emb] += _tfidf * float(emb_vec[emb])
except IndexError:
res.append(_tfidf * float(emb_vec[emb]))
for k in res:
k /= divisor
return res
|
swagner-de/irws_homeworks
|
word_embeddings/embedding.py
|
Python
|
mit
| 1,203
|
# -*- coding: utf-8 -*-
import urllib2
from lxml import etree
class Scraper(object):
# subclass must override this property
url = None
def scrape(self):
stream = self.get_stream()
doc = self.get_document(stream)
return self.do_scrape(doc)
def get_document(self, stream):
doc = etree.parse(stream, etree.HTMLParser(remove_comments=True))
return doc
def get_stream(self):
return urllib2.urlopen(self.url)
def do_scrape(self, doc):
raise NotImplementedError()
|
csakatoku/uamobile
|
uamobile/scrapers/base.py
|
Python
|
mit
| 543
|
#!/usr/bin/env python
# coding: UTF-8
# The MIT License
#
# Copyright (c) 2011 Keita Kita
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#
# THE SOFTWARE.
import argparse
import os
import os.path
import sqlite3
import sys
import work_recorder
# Name of column that represents month. Format is YYYY-MM.
COLUMN_MONTH = u'month'
# Name of column that represents hours.
COLUMN_HOURS = u'hours'
# Key of time that represents a month.
TIME_KEY_MONTH = COLUMN_MONTH
# Key of time that represents a project.
TIME_KEY_PROJECT = work_recorder.COLUMN_PROJECT
# Key of time that represents hours a project.
TIME_KEY_HOURS = COLUMN_HOURS
def aggregate_work_time(start_day, end_day, conn):
u"""
Aggregate work times a project.
Parameters:
start_day : Day of start. Format is YYYY-MM-DD.
end_day : Day of end. Format is YYYY-MM-DD.
conn : Connection of database.
Return:
A list of dictionary. Key is a name of project. Value is its work time.
"""
cursor = conn.execute(u"""
select strftime('%Y-%m', {day}) as {month},
{project},
round(sum(strftime('%s', {day} || ' ' || {end}) -
strftime('%s', {day} || ' ' || {start})) / 60.0 / 60.0, 2) as {hours}
from {work_time}
where {day} between :start and :end
group by {month}, {project}
order by {month} asc, {hours} desc
""".format(month = COLUMN_MONTH,
project = work_recorder.COLUMN_PROJECT,
day = work_recorder.COLUMN_DAY,
end = work_recorder.COLUMN_END,
hours = COLUMN_HOURS,
start = work_recorder.COLUMN_START,
work_time = work_recorder.TABLE_WORK_TIME),
{u'start': start_day, u'end': end_day})
return [
{TIME_KEY_MONTH: a_row[0],
TIME_KEY_PROJECT: a_row[1],
TIME_KEY_HOURS: a_row[2]}
for a_row in cursor]
def print_result(times):
previous_month = None
for a_time in times:
month = a_time[TIME_KEY_MONTH]
if month != previous_month:
if previous_month != None:
print
print month
previous_month = month
print u'\t%s: %s hours' % (a_time[TIME_KEY_PROJECT], a_time[TIME_KEY_HOURS])
def main():
u"""
Aggregate work time a project.
Command line arguments:
<start day> : Day of start, format : YYYYMMDD or MMDD, MDD (required)
<end day> : Day of end, format : YYYYMMDD or MMDD, MDD (required)
"""
parser = argparse.ArgumentParser(description = u'Aggregate work time a project.')
parser.add_argument(u'start_day',
help = u'Day of start. Format is YYYYMMDD or MMDD, MDD')
parser.add_argument(u'end_day',
help = u'Day of end. Format is YYYYMMDD or MMDD, MDD')
args = parser.parse_args()
try:
start_day = work_recorder.convert_day(args.start_day)
end_day = work_recorder.convert_day(args.end_day)
except work_recorder.InvalidArgumentFormatException:
print u'Your arguments discords with formats.'
sys.exit(1)
database = os.path.join(os.getcwdu(), work_recorder.DATABASE_FILE_NAME)
with sqlite3.connect(database) as conn:
times = aggregate_work_time(start_day, end_day, conn)
print_result(times)
if __name__ == '__main__':
main()
|
mikanbako/Work-Recorder
|
src/main/aggregate_work_time.py
|
Python
|
mit
| 4,369
|
from .analyzer import Pep8Analyzer
from .issues_data import issues_data
analyzers = {
'pep8' :
{
'title' : 'Pep-8',
'class' : Pep8Analyzer,
'language' : 'python',
'issues_data' : issues_data,
},
}
|
quantifiedcode/checkmate
|
checkmate/contrib/plugins/python/pep8/setup.py
|
Python
|
mit
| 266
|
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
with open('readme.rst') as reader:
long_description = reader.read()
setup(name='theape',
long_description=long_description,
version= '2014.11.10',
description="The All-Purpose Experimenter.",
author="russell",
platforms=['linux'],
url = '',
author_email="necromuralist@gmail.com",
license = "MIT",
install_requires = 'pudb numpy paramiko configobj docopt'.split(),
packages = find_packages(),
include_package_data = True,
package_data = {"theape":["*.txt", "*.rst", "*.ini"]},
entry_points = """
[console_scripts]
theape=theape.main:main
[theape.subcommands]
subcommands=theape.infrastructure.arguments
[theape.plugins]
plugins = theape.plugins
"""
)
# an example last line would be cpm= cpm.main: main
# If you want to require other packages add (to setup parameters):
# install_requires = [<package>],
#version=datetime.today().strftime("%Y.%m.%d"),
# if you have an egg somewhere other than PyPi that needs to be installed as a dependency, point to a page where you can download it:
# dependency_links = ["http://<url>"]
|
rsnakamura/theape
|
setup.py
|
Python
|
mit
| 1,343
|
import pygame
from ui.utils.interpolator import Interpolator
class LcarsWidget(pygame.sprite.DirtySprite):
"""Base class for all widgets"""
def __init__(self, color, pos, size, handler=None):
pygame.sprite.DirtySprite.__init__(self)
if self.image == None:
self.image = pygame.Surface(size).convert()
self.image.fill(color)
self.rect = self.image.get_rect()
self.rect.top = pos[0]
self.rect.left = pos[1]
self.size = (self.rect.width, self.rect.height)
self.long_pressed = False
self.pressed_time = 0
self.focussed = False
self.line = None
self.handler = handler
def update(self, screen):
if not self.visible:
return
if self.line != None:
self.line.next()
if self.rect.center == self.line.pos:
self.dirty = 0
self.rect.center = self.line.pos
else:
self.dirty = 0
screen.blit(self.image, self.rect)
def handleEvent(self, event, clock):
handled = False
if not self.visible:
self.focussed = False
return handled
if event.type == pygame.MOUSEBUTTONDOWN:
self.pressed_time = pygame.time.get_ticks()
self.focussed = True
if event.type == pygame.MOUSEMOTION:
if (self.focussed and pygame.time.get_ticks() - self.pressed_time > 1000):
self.long_pressed = True
if self.groups()[0].UI_PLACEMENT_MODE:
self.rect.top = event.pos[1]
self.rect.left = event.pos[0]
self.dirty = 1
if event.type == pygame.MOUSEBUTTONUP:
if self.handler:
self.handler(self, event, clock)
handled = True
if self.focussed and self.long_pressed and self.groups()[0].UI_PLACEMENT_MODE:
print(event.pos[1], event.pos[0])
self.pressed_time = 0
self.long_pressed = False
self.focussed = False
return handled
def applyColour(self, colour):
"""Convert non-black areas of an image to specified colour"""
for x in range(0, self.size[0]):
for y in range(0, self.size[1]):
pixel = self.image.get_at((x, y)).r
if (pixel > 50):
self.image.set_at((x, y), colour)
class LcarsMoveToMouse(LcarsWidget):
"""For testing purposes - move a small square to last clicked position"""
def __init__(self, color):
self.image = None
LcarsWidget.__init__(self, color, (0,0), (10,10))
self.focussed = True
def handleEvent(self, event, clock):
if event.type == pygame.MOUSEBUTTONDOWN:
# move sprite to clicked location using interpolator
fps = clock.get_fps()
x, y = event.pos
self.line = Interpolator(
self.rect.center,
(x, y),
0.5, # duration of interpolation
fps, # current frames per second
1.0, # type of interpolation
0.5 # middle?
)
self.dirty = 1
|
tobykurien/rpi_lcars
|
app/ui/widgets/sprite.py
|
Python
|
mit
| 3,393
|
a = (1, 2)
b = (1, 3, 5, 7, 8, 11)
print a[0]
#b[3] = 3 # error!
x1 = a[0]
y1 = a[1]
x1, y1 = a
b1, b2, b3, b4, b5, b6 = b
print b4
#b1, b2 = b # error!
a = 1, 2, 3
print a
def f():
return 1, 3
a = f()
x, y = f()
x = f()[0]
|
amiraliakbari/sharif-mabani-python
|
by-session/ta-921/j8/tuple1.py
|
Python
|
mit
| 263
|
__author__ = 'Varun Nayyar'
from Utils.MFCCArrayGen import emotions, speakers, getCorpus
from MCMC import MCMCRun
from emailAlerter import alertMe
def main2(numRuns = 100000, numMixtures = 8, speakerIndex = 6):
import time
for emotion in emotions:
start = time.ctime()
Xpoints = getCorpus(emotion, speakers[speakerIndex])
message = MCMCRun(Xpoints, emotion+"-"+speakers[speakerIndex], numRuns, numMixtures)
message += "Start time: {}\nEnd Time: {}\n".format(start, time.ctime())
message += "\nNumRuns: {}, numMixtures:{}\n ".format(numRuns, numMixtures)
message += "\nEmotion: {}, speaker:{}\n".format(emotion, speakers[speakerIndex])
alertMe(message)
if __name__ == "__main__":
for i in xrange(len(speakers)):
main2(numMixtures=8, speakerIndex=i)
|
nayyarv/MonteGMM
|
Inference/BayesInference.py
|
Python
|
mit
| 844
|
#!/usr/bin/env python
import sys
import os.path
import datetime
import subprocess
import time
"""
Run snapstream reader script for several files and out a table of match counts.
Example:
$ python run_on_dates.py eg01china.c 2014-01-01 2014-07-01
"""
def validate(date_string):
try:
datetime.datetime.strptime(date_string,"%Y-%m-%d")
except ValueError as e:
raise ValueError("Incorrect date format, should be YYYY-MM-DD")
if __name__ == "__main__":
start_time = time.time()
if len(sys.argv) != 4:
print("3 arguments are needed, file, begin date and end date")
exit(-1)
file_name = sys.argv[1]
begin_date = sys.argv[2]
end_date = sys.argv[3]
if os.path.isfile(file_name) is False:
print(file_name + " does not exist or is an invalid file")
exit(-1)
validate(begin_date)
validate(end_date)
print("Running %s from %s to %s..." % (file_name, begin_date, end_date))
data_files = ["Data/" + f for f in os.listdir("Data") if f >= begin_date and f < end_date]
data_files.sort()
full_output = open("full_output.txt","w")
print("\t".join(["dt","total_matches_cnt","matching_programs_cnt","total_programs_cnt","selected_programs_cnt"]))
os.system("gcc " + file_name)
for f in data_files:
date_string = f[5:15]
full_output.write(f + "\n" + "====================\n\n")
proc = subprocess.Popen(["./a.out",f], stdout=subprocess.PIPE)
proc_out = proc.communicate()[0].decode('utf-8')
full_output.write(proc_out)
proc_out = proc_out.split('\n')
print("\t".join([date_string]) + '\t' + proc_out[-2])
full_output.close()
print(str(time.time() - start_time) + " seconds taken")
|
ppham27/snapstream-reader
|
legacy/run_on_dates.py
|
Python
|
mit
| 1,762
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-25 16:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address_type', models.CharField(choices=[('BU', 'Business'), ('PE', 'Personal')], max_length=2)),
('city', models.CharField(max_length=32, null=True)),
('country_code', models.CharField(max_length=2, null=True)),
('cc_id', models.CharField(max_length=36)),
('line1', models.CharField(max_length=100, null=True)),
('line2', models.CharField(max_length=100, null=True)),
('line3', models.CharField(max_length=100, null=True)),
('postal_code', models.CharField(max_length=10, null=True)),
('state', models.CharField(max_length=20, null=True)),
('state_code', models.CharField(max_length=2, null=True)),
('sub_postal_code', models.CharField(max_length=20, null=True)),
],
),
migrations.CreateModel(
name='ConstantContactList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cc_id', models.IntegerField()),
('status', models.CharField(choices=[('AC', 'Active'), ('HI', 'Hidden')], max_length=2)),
('name', models.CharField(max_length=48)),
('created_date', models.DateTimeField()),
('modified_date', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('confirmed', models.NullBooleanField()),
('company_name', models.CharField(max_length=100, null=True)),
('created_date', models.DateTimeField()),
('first_name', models.CharField(max_length=50, null=True)),
('middle_name', models.CharField(max_length=50, null=True)),
('last_name', models.CharField(max_length=50, null=True)),
('cc_id', models.IntegerField()),
('cc_modified_date', models.DateTimeField()),
('prefix_name', models.CharField(max_length=10, null=True)),
('job_title', models.CharField(max_length=50, null=True)),
('source', models.CharField(max_length=50, null=True)),
('status', models.CharField(choices=[('UN', 'Unconfirmed'), ('AC', 'Active'), ('OP', 'Optout'), ('RE', 'Removed'), ('NO', 'Non Subscriber')], max_length=2)),
('addresses', models.ManyToManyField(to='datacombine.Address')),
],
),
migrations.CreateModel(
name='EmailAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('confirm_status', models.CharField(choices=[('CO', 'Confirmed'), ('NC', 'No Confirmation Required')], max_length=3)),
('cc_id', models.CharField(max_length=36)),
('status', models.CharField(choices=[('UN', 'Unconfirmed'), ('AC', 'Active'), ('OP', 'Optout'), ('RE', 'Removed'), ('NO', 'Non Subscriber')], max_length=2)),
('opt_in_date', models.DateTimeField(null=True)),
('opt_out_date', models.DateTimeField(null=True)),
('email_address', models.EmailField(max_length=254)),
('opt_in_source', models.CharField(choices=[('AO', 'Action by Owner'), ('AV', 'Action by Visitor')], max_length=2)),
],
),
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField()),
('cc_id', models.CharField(max_length=36)),
('modified_date', models.DateTimeField()),
('note', models.TextField()),
],
),
migrations.CreateModel(
name='Phone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('area_code', models.CharField(max_length=3, null=True)),
('number', models.CharField(max_length=7)),
('extension', models.CharField(max_length=7, null=True)),
],
),
migrations.CreateModel(
name='UserStatusOnCCList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('AC', 'Active'), ('HI', 'Hidden')], max_length=2)),
('cclist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='datacombine.ConstantContactList')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='datacombine.Contact')),
],
),
migrations.AddField(
model_name='contact',
name='cc_lists',
field=models.ManyToManyField(through='datacombine.UserStatusOnCCList', to='datacombine.ConstantContactList'),
),
migrations.AddField(
model_name='contact',
name='cell_phone',
field=models.ManyToManyField(related_name='_contact_cell_phone_+', to='datacombine.Phone'),
),
migrations.AddField(
model_name='contact',
name='email_addresses',
field=models.ManyToManyField(to='datacombine.EmailAddress'),
),
migrations.AddField(
model_name='contact',
name='fax',
field=models.ManyToManyField(related_name='_contact_fax_+', to='datacombine.Phone'),
),
migrations.AddField(
model_name='contact',
name='home_phone',
field=models.ManyToManyField(related_name='_contact_home_phone_+', to='datacombine.Phone'),
),
migrations.AddField(
model_name='contact',
name='notes',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='datacombine.Note'),
),
migrations.AddField(
model_name='contact',
name='work_phone',
field=models.ManyToManyField(related_name='_contact_work_phone_+', to='datacombine.Phone'),
),
]
|
Crimson-Star-Software/data-combine
|
datacombine/datacombine/migrations/0001_initial.py
|
Python
|
mit
| 7,003
|
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from django.conf import settings
from django.forms import SelectMultiple
import django_filters
from pdc.apps.common.filters import MultiValueFilter, NullableCharFilter
from . import models
class RPMFilter(django_filters.FilterSet):
name = MultiValueFilter()
version = MultiValueFilter()
epoch = MultiValueFilter()
release = MultiValueFilter()
arch = MultiValueFilter()
srpm_name = MultiValueFilter()
srpm_nevra = NullableCharFilter()
filename = MultiValueFilter()
compose = MultiValueFilter(name='composerpm__variant_arch__variant__compose__compose_id',
distinct=True)
linked_release = MultiValueFilter(name='linked_releases__release_id', distinct=True)
class Meta:
model = models.RPM
fields = ('name', 'version', 'epoch', 'release', 'arch', 'srpm_name',
'srpm_nevra', 'compose', 'filename', 'linked_release')
class ImageFilter(django_filters.FilterSet):
file_name = MultiValueFilter()
image_format = MultiValueFilter(name='image_format__name')
image_type = MultiValueFilter(name='image_type__name')
disc_number = MultiValueFilter()
disc_count = MultiValueFilter()
arch = MultiValueFilter()
mtime = MultiValueFilter()
size = MultiValueFilter()
implant_md5 = MultiValueFilter()
volume_id = MultiValueFilter()
md5 = MultiValueFilter()
sha1 = MultiValueFilter()
sha256 = MultiValueFilter()
compose = MultiValueFilter(name='composeimage__variant_arch__variant__compose__compose_id',
distinct=True)
class Meta:
model = models.Image
fields = ('file_name', 'image_format', 'image_type', 'disc_number',
'disc_count', 'arch', 'mtime', 'size', 'bootable',
'implant_md5', 'volume_id', 'md5', 'sha1', 'sha256')
class BuildImageFilter(django_filters.FilterSet):
if settings.WITH_BINDINGS:
component_name = django_filters.MethodFilter(action='filter_by_component_name',
widget=SelectMultiple)
else:
component_name = MultiValueFilter(name='rpms__srpm_name', distinct=True)
rpm_version = MultiValueFilter(name='rpms__version', distinct=True)
rpm_release = MultiValueFilter(name='rpms__release', distinct=True)
image_id = MultiValueFilter()
image_format = MultiValueFilter(name='image_format__name')
md5 = MultiValueFilter()
archive_build_nvr = MultiValueFilter(name='archives__build_nvr', distinct=True)
archive_name = MultiValueFilter(name='archives__name', distinct=True)
archive_size = MultiValueFilter(name='archives__size', distinct=True)
archive_md5 = MultiValueFilter(name='archives__md5', distinct=True)
release_id = MultiValueFilter(name='releases__release_id', distinct=True)
def filter_by_component_name(self, queryset, value):
from pdc.apps.bindings import models as binding_models
srpm_names = binding_models.ReleaseComponentSRPMNameMapping.objects.filter(
release_component__name__in=value).distinct().values_list('srpm_name')
if value:
if srpm_names:
return queryset.filter(rpms__srpm_name__in=srpm_names).distinct()
else:
return queryset.filter(rpms__srpm_name__in=value).distinct()
else:
return queryset
class Meta:
model = models.BuildImage
fields = ('component_name', 'rpm_version', 'rpm_release', 'image_id', 'image_format', 'md5',
'archive_build_nvr', 'archive_name', 'archive_size', 'archive_md5', 'release_id')
|
maxamillion/product-definition-center
|
pdc/apps/package/filters.py
|
Python
|
mit
| 4,125
|
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
# Copyright (c) 2013 Mime Consulting Ltd. <info@mimeconsulting.co.uk>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
import socket
import pytest
import mock
from lars import dns
def test_from_address():
with mock.patch('tests.test_dns.dns.socket.getnameinfo') as getnameinfo:
getnameinfo.return_value = ('9.0.0.0', 0)
dns.from_address('9.0.0.0')
assert getnameinfo.called_with(('9.0.0.0', 0), 0)
getnameinfo.return_value = ('0.0.0.0', 0)
dns.from_address('0.0.0.0')
assert getnameinfo.called_with(('0.0.0.0', 0), 0)
def test_to_address():
with mock.patch('tests.test_dns.dns.socket.getaddrinfo') as getaddrinfo:
getaddrinfo.return_value = [(socket.AF_INET, 0, 0, 0, ('127.0.0.1', 0))]
assert dns.to_address('localhost') == '127.0.0.1'
getaddrinfo.return_value = [(socket.AF_INET6, 0, 0, 0, ('::1', 0, 0, 0))]
assert dns.to_address('ip6-localhost') == '::1'
# Ensure IPv4 is always preferred over IPv6, if available
getaddrinfo.return_value = [
(socket.AF_INET6, 0, 0, 0, ('::1', 0, 0, 0)),
(socket.AF_INET6, 0, 0, 0, ('::2', 0, 0, 0)),
(socket.AF_INET, 0, 0, 0, ('127.0.0.1', 0)),
]
assert dns.to_address('dualstack-localhost') == '127.0.0.1'
|
waveform80/lars
|
tests/test_dns.py
|
Python
|
mit
| 2,559
|
#! python3
'''
mcbd.py - Saves and loads pieces of text from/to the clipboard to/from a
shelf type file.
Usage: python3 mcbd.py save <keyword> - saves clipboard for keyword.
python3 mcbd.py <keyword> - loads to clipboard for keyword.
python3 mcbd.py list - loads all keywords to clipboard.
python3 mcbd.py delete <keyword> - deletes for keyword.
python3 mcbd.py delete - deletes all keywords.
'''
'''
Say you have the boring task of filling out many forms in a web page or
software with several text fields. The clipboard saves you from typing
the same text over and over again. But only one thing can be on the
clipboard at a time. If you have several different pieces of text that
you need to copy and paste, you have to keep highlighting and copying
the same few things over and over again. You can write a Python
program to keep track of multiple pieces of text.
'''
'''
Extend the multiclipboard program in this chapter so that it has a
delete <keyword> command line argument that will delete a keyword from
the shelf. Then add a delete command line argument that will delete all
keywords.
'''
import pyperclip
import shelve
import sys
import textwrap
def print_usage():
print(textwrap.dedent(
'''
Usage: python3 mcbd.py save <keyword> - saves clipboard for keyword.
python3 mcbd.py <keyword> - loads to clipboard for keyword.
python3 mcbd.py list - loads all keywords to clipboard.
python3 mcbd.py delete <keyword> - deletes for keyword.
python3 mcbd.py delete - deletes all keywords.
'''))
mcbShelf = shelve.open('mcb') # file created if not already existing
# save or delete specified keywords
if len(sys.argv) == 3:
if sys.argv[1].lower() == 'save':
mcbShelf[sys.argv[2]] = pyperclip.paste()
print('clipboard saved under keyword:', sys.argv[2])
elif sys.argv[1].lower() == 'delete':
del mcbShelf[sys.argv[2]]
print('deleted keyword:', sys.argv[2])
# list or delete all keywords or fetch one
elif len(sys.argv) == 2:
if sys.argv[1].lower() == 'list':
pyperclip.copy(str(list(mcbShelf.keys())))
print('all keywords copied to clipboard')
elif sys.argv[1].lower() == 'delete':
mcbShelf.clear()
print('all keywords deleted')
elif sys.argv[1] in mcbShelf:
pyperclip.copy(mcbShelf[sys.argv[1]])
print('copied to clipboard for keyword:', sys.argv[1])
else:
print('no such keyword:', sys.argv[1])
print_usage()
else:
print_usage()
mcbShelf.close()
|
apaksoy/automatetheboringstuff
|
practice projects/chap 08/multiclipboard with deletion chap 8/mcbd.py
|
Python
|
mit
| 2,608
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class HttpRetry(object):
"""HttpRetry operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def head408(
self, custom_headers=None, raw=False, **operation_config):
"""
Return 408 status code, then 200 after retry
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/408'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put500(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 500 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/500'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch500(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 500 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/500'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get502(
self, custom_headers=None, raw=False, **operation_config):
"""
Return 502 status code, then 200 after retry
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/502'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post503(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 503 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/503'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete503(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 503 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/503'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put504(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 504 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/504'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch504(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 504 status code, then 200 after retry
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/retry/504'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
sharadagarwal/autorest
|
AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/Http/autoresthttpinfrastructuretestservice/operations/http_retry.py
|
Python
|
mit
| 13,601
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# tritonschedule documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 22 11:40:06 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'tritonschedule'
copyright = '2016, tritonschedule'
author = 'tritonschedule'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'tritonschedule v'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'tritonscheduledoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tritonschedule.tex', 'tritonschedule Documentation',
'tritonschedule', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tritonschedule', 'tritonschedule Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'tritonschedule', 'tritonschedule Documentation',
author, 'tritonschedule', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#
# epub_tocdepth = 3
# Allow duplicate toc entries.
#
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#
# epub_fix_images = False
# Scale large images.
#
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# epub_show_urls = 'inline'
# If false, no index is generated.
#
# epub_use_index = True
|
brianhang/tritonscheduler
|
docs/conf.py
|
Python
|
mit
| 11,786
|
from django.conf.urls import url
from .views import ObservationListView, AddObservationView, UploadObservationsView
app_name = "observations"
urlpatterns = [
url(r"^$", ObservationListView.as_view(), name="observation_list"),
url(
r"^(?P<observer_id>\d+)/$",
ObservationListView.as_view(),
name="observation_list_by_observer",
),
url(r"^add/$", AddObservationView.as_view(), name="add_observation"),
url(
r"^add/(?P<star_id>\d+)/$",
AddObservationView.as_view(),
name="add_observation_for_star",
),
url(r"^upload/$", UploadObservationsView.as_view(), name="upload_observations"),
]
|
zsiciarz/variablestars.net
|
observations/urls.py
|
Python
|
mit
| 661
|
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import (assert_equal, assert_raises, raises, assert_true,
assert_false)
try:
from nose.tools import assert_is
except ImportError:
from landlab.testing.tools import assert_is
from landlab import RasterModelGrid
class TestRasterModelGridConnectingFaces():
def setup(self):
self.rmg = RasterModelGrid(4, 5)
def test_horizontally_adjacent_cells(self):
assert_array_equal(self.rmg.get_face_connecting_cell_pair(0, 1),
np.array([10]))
def test_vertically_adjacent_cells(self):
assert_array_equal(self.rmg.get_face_connecting_cell_pair(0, 3),
np.array([3]))
def test_diagonally_adjacent_cells(self):
assert_array_equal(self.rmg.get_face_connecting_cell_pair(1, 5),
np.array([]))
def test_non_adjacent_cells(self):
assert_array_equal(self.rmg.get_face_connecting_cell_pair(0, 2),
np.array([]))
class TestRasterModelGridCellFaces():
def setup(self):
self.rmg = RasterModelGrid(4, 5)
def test_id_as_int(self):
assert_array_equal(self.rmg.cell_faces(0), np.array([0, 9, 3, 10]))
def test_id_as_array(self):
assert_array_equal(self.rmg.cell_faces(np.array([0, 1])),
np.array([[0, 9, 3, 10], [1, 10, 4, 11]]))
|
decvalts/landlab
|
landlab/grid/tests/test_raster_grid/test_faces.py
|
Python
|
mit
| 1,453
|
resultado = 0
contador = 0
numero = int(input('Digite um numero: '))
while resultado < numero:
resultado = (contador -2) * (contador -1) * contador
if resultado == numero:
print('O numero %d e triangular. %d x %d x %d = %d' % (numero, (contador -2), (contador - 1), contador, numero))
break
contador += 1
else:
print('O numero %d nao e triangular')
|
kidchenko/playground
|
python-para-zumbis/Lista 3/Desafios/exercicio1.py
|
Python
|
mit
| 398
|
<<<<<<< HEAD
<<<<<<< HEAD
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
from collections.abc import MutableMapping
from collections import OrderedDict as _default_dict, ChainMap as _ChainMap
import functools
import io
import itertools
import re
import sys
import warnings
__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError",
"NoOptionError", "InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is repeated in an input source.
Possible repetitions that raise this exception are: multiple creation
using the API or in strict parsers when a section is found more than once
in a single input file, string or dictionary.
"""
def __init__(self, section, source=None, lineno=None):
msg = [repr(section), " already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": section ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Section ")
Error.__init__(self, "".join(msg))
self.section = section
self.source = source
self.lineno = lineno
self.args = (section, source, lineno)
class DuplicateOptionError(Error):
"""Raised by strict parsers when an option is repeated in an input source.
Current implementation raises this exception only when an option is found
more than once in a single file, string or dictionary.
"""
def __init__(self, section, option, source=None, lineno=None):
msg = [repr(option), " in section ", repr(section),
" already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": option ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Option ")
Error.__init__(self, "".join(msg))
self.section = section
self.option = option
self.source = source
self.lineno = lineno
self.args = (section, option, source, lineno)
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text contains invalid syntax.
Current implementation raises this exception when the source text into
which substitutions are made does not conform to the required syntax.
"""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, source=None, filename=None):
# Exactly one of `source'/`filename' arguments has to be given.
# `filename' kept for compatibility.
if filename and source:
raise ValueError("Cannot specify both `filename' and `source'. "
"Use `source'.")
elif not filename and not source:
raise ValueError("Required argument `source' not given.")
elif filename:
source = filename
Error.__init__(self, 'Source contains parsing errors: %r' % source)
self.source = source
self.errors = []
self.args = (source, )
@property
def filename(self):
"""Deprecated, use `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
return self.source
@filename.setter
def filename(self, value):
"""Deprecated, user `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
self.source = value
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %r, line: %d\n%r' %
(filename, lineno, line))
self.source = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
# Used in parser getters to indicate the default behaviour when a specific
# option is not found it to raise an exception. Created to enable `None' as
# a valid fallback value.
_UNSET = object()
class Interpolation:
"""Dummy interpolation that passes the value through with no changes."""
def before_get(self, parser, section, option, value, defaults):
return value
def before_set(self, parser, section, option, value):
return value
def before_read(self, parser, section, option, value):
return value
def before_write(self, parser, section, option, value):
return value
class BasicInterpolation(Interpolation):
"""Interpolation as implemented in the classic ConfigParser.
The option values can contain format strings which refer to other values in
the same section, or values in the special default section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand. If a user needs to use a bare % in
a configuration file, she can escape it by writing %%. Other % usage
is considered a user error and raises `InterpolationSyntaxError'."""
_KEYCRE = re.compile(r"%\(([^)]+)\)s")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('%%', '') # escaped percent signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = parser.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(parser, option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', "
"found: %r" % (rest,))
class ExtendedInterpolation(Interpolation):
"""Advanced variant of interpolation, supports the syntax used by
`zc.buildout'. Enables interpolation between sections."""
_KEYCRE = re.compile(r"\$\{([^}]+)\}")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('$$', '') # escaped dollar signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '$' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('$')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("$")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "$":
accum.append("$")
rest = rest[2:]
elif c == "{":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
path = m.group(1).split(':')
rest = rest[m.end():]
sect = section
opt = option
try:
if len(path) == 1:
opt = parser.optionxform(path[0])
v = map[opt]
elif len(path) == 2:
sect = path[0]
opt = parser.optionxform(path[1])
v = parser.get(sect, opt, raw=True)
else:
raise InterpolationSyntaxError(
option, section,
"More than one ':' found: %r" % (rest,))
except (KeyError, NoSectionError, NoOptionError):
raise InterpolationMissingOptionError(
option, section, rest, ":".join(path))
if "$" in v:
self._interpolate_some(parser, opt, accum, v, sect,
dict(parser.items(sect, raw=True)),
depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'$' must be followed by '$' or '{', "
"found: %r" % (rest,))
class LegacyInterpolation(Interpolation):
"""Deprecated interpolation used in old versions of ConfigParser.
Use BasicInterpolation or ExtendedInterpolation instead."""
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def before_get(self, parser, section, option, value, vars):
rawval = value
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
replace = functools.partial(self._interpolation_replace,
parser=parser)
value = self._KEYCRE.sub(replace, value)
try:
value = value % vars
except KeyError as e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
def before_set(self, parser, section, option, value):
return value
@staticmethod
def _interpolation_replace(match, parser):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % parser.optionxform(s)
class RawConfigParser(MutableMapping):
"""ConfigParser that does not do interpolation."""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Interpolation algorithm to be used if the user does not specify another
_DEFAULT_INTERPOLATION = Interpolation()
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
# Possible boolean values in the configuration.
BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, empty_lines_in_values=True,
default_section=DEFAULTSECT,
interpolation=_UNSET):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
self._proxies = self._dict()
self._proxies[default_section] = SectionProxy(self, default_section)
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
self._empty_lines_in_values = empty_lines_in_values
self.default_section=default_section
self._interpolation = interpolation
if self._interpolation is _UNSET:
self._interpolation = self._DEFAULT_INTERPOLATION
if self._interpolation is None:
self._interpolation = Interpolation()
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return list(self._sections.keys())
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
"""
if section == self.default_section:
raise ValueError('Invalid section name: %r' % section)
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
self._proxies[section] = SectionProxy(self, section)
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
return list(opts.keys())
def read(self, filenames, encoding=None):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
except OSError:
continue
read_ok.append(filename)
return read_ok
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The `f' argument must be iterable, returning one line at a time.
Optional second argument is the `source' specifying the name of the
file being read. If not given, it is taken from f.name. If `f' has no
`name' attribute, `<???>' is used.
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string."""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def read_dict(self, dictionary, source='<dict>'):
"""Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
"""
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value)
def readfp(self, fp, filename=None):
"""Deprecated, use read_file instead."""
warnings.warn(
"This method will be removed in future versions. "
"Use 'parser.read_file()' instead.",
DeprecationWarning, stacklevel=2
)
self.read_file(fp, source=filename)
def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `DEFAULTSECT' in that order.
If the key is not found and `fallback' is provided, it is used as
a fallback value. `None' can be provided as a `fallback' value.
If interpolation is enabled and the optional argument `raw' is False,
all interpolations are expanded in the return values.
Arguments `raw', `vars', and `fallback' are keyword only.
The section DEFAULT is special.
"""
try:
d = self._unify_values(section, vars)
except NoSectionError:
if fallback is _UNSET:
raise
else:
return fallback
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
if fallback is _UNSET:
raise NoOptionError(option, section)
else:
return fallback
if raw or value is None:
return value
else:
return self._interpolation.before_get(self, section, option, value,
d)
def _get(self, section, conv, option, **kwargs):
return conv(self.get(section, option, **kwargs))
def getint(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, int, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getfloat(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, float, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getboolean(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, self._convert_to_boolean, option,
raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def items(self, section=_UNSET, raw=False, vars=None):
"""Return a list of (name, value) tuples for each option in a section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
if section is _UNSET:
return super().items()
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
value_getter = lambda option: self._interpolation.before_get(self,
section, option, d[option], d)
if raw:
value_getter = lambda option: d[option]
return [(option, value_getter(option)) for option in d.keys()]
def popitem(self):
"""Remove a section from the parser and return it as
a (section_name, section_proxy) tuple. If no section is present, raise
KeyError.
The section DEFAULT is never returned because it cannot be removed.
"""
for key in self.sections():
value = self[key]
del self[key]
return key, value
raise KeyError
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False."""
if not section or section == self.default_section:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if value:
value = self._interpolation.before_set(self, section, option,
value)
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp, space_around_delimiters=True):
"""Write an .ini-format representation of the configuration state.
If `space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
if space_around_delimiters:
d = " {} ".format(self._delimiters[0])
else:
d = self._delimiters[0]
if self._defaults:
self._write_section(fp, self.default_section,
self._defaults.items(), d)
for section in self._sections:
self._write_section(fp, section,
self._sections[section].items(), d)
def _write_section(self, fp, section_name, section_items, delimiter):
"""Write a single section to the specified `fp'."""
fp.write("[{}]\n".format(section_name))
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key,
value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{}{}\n".format(key, value))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
del self._proxies[section]
return existed
def __getitem__(self, key):
if key != self.default_section and not self.has_section(key):
raise KeyError(key)
return self._proxies[key]
def __setitem__(self, key, value):
# To conform with the mapping protocol, overwrites existing values in
# the section.
# XXX this is not atomic if read_dict fails at any point. Then again,
# no update method in configparser is atomic in this implementation.
if key == self.default_section:
self._defaults.clear()
elif key in self._sections:
self._sections[key].clear()
self.read_dict({key: value})
def __delitem__(self, key):
if key == self.default_section:
raise ValueError("Cannot remove the default section.")
if not self.has_section(key):
raise KeyError(key)
self.remove_section(key)
def __contains__(self, key):
return key == self.default_section or self.has_section(key)
def __len__(self):
return len(self._sections) + 1 # the default section
def __iter__(self):
# XXX does it break when underlying container state changed?
return itertools.chain((self.default_section,), self._sections.keys())
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]'), plus key/value options, indicated by
`name' and `value' delimited with a specific substring (`=' or `:' by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#' and `;' by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
"""
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
else:
# empty line marks end of value
indent_level = sys.maxsize
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
elif sectname == self.default_section:
cursect = self._defaults
else:
cursect = self._dict()
self._sections[sectname] = cursect
self._proxies[sectname] = SectionProxy(self, sectname)
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
self._join_multiline_values()
def _join_multiline_values(self):
defaults = self.default_section, self._defaults
all_sections = itertools.chain((defaults,),
self._sections.items())
for section, options in all_sections:
for name, val in options.items():
if isinstance(val, list):
val = '\n'.join(val).rstrip()
options[name] = self._interpolation.before_read(self,
section,
name, val)
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def _unify_values(self, section, vars):
"""Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
if value is not None:
value = str(value)
vardict[self.optionxform(key)] = value
return _ChainMap(vardict, sectiondict, self._defaults)
def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return self.BOOLEAN_STATES[value.lower()]
def _validate_value_types(self, *, section="", option="", value=""):
"""Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
"""
if not isinstance(section, str):
raise TypeError("section names must be strings")
if not isinstance(option, str):
raise TypeError("option keys must be strings")
if not self._allow_no_value or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
class ConfigParser(RawConfigParser):
"""ConfigParser implementing interpolation."""
_DEFAULT_INTERPOLATION = BasicInterpolation()
def set(self, section, option, value=None):
"""Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value."""
self._validate_value_types(option=option, value=value)
super().set(section, option, value)
def add_section(self, section):
"""Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string."""
self._validate_value_types(section=section)
super().add_section(section)
class SafeConfigParser(ConfigParser):
"""ConfigParser alias for backwards compatibility purposes."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
"The SafeConfigParser class has been renamed to ConfigParser "
"in Python 3.2. This alias will be removed in future versions."
" Use ConfigParser directly instead.",
DeprecationWarning, stacklevel=2
)
class SectionProxy(MutableMapping):
"""A proxy for a single section from a parser."""
def __init__(self, parser, name):
"""Creates a view on a section of the specified `name` in `parser`."""
self._parser = parser
self._name = name
def __repr__(self):
return '<Section: {}>'.format(self._name)
def __getitem__(self, key):
if not self._parser.has_option(self._name, key):
raise KeyError(key)
return self._parser.get(self._name, key)
def __setitem__(self, key, value):
self._parser._validate_value_types(option=key, value=value)
return self._parser.set(self._name, key, value)
def __delitem__(self, key):
if not (self._parser.has_option(self._name, key) and
self._parser.remove_option(self._name, key)):
raise KeyError(key)
def __contains__(self, key):
return self._parser.has_option(self._name, key)
def __len__(self):
return len(self._options())
def __iter__(self):
return self._options().__iter__()
def _options(self):
if self._name != self._parser.default_section:
return self._parser.options(self._name)
else:
return self._parser.defaults()
def get(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.get(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getint(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getint(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getfloat(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getfloat(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getboolean(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getboolean(self._name, option, raw=raw, vars=vars,
fallback=fallback)
@property
def parser(self):
# The parser object of the proxy is read-only.
return self._parser
@property
def name(self):
# The name of the section on a proxy is read-only.
return self._name
=======
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
from collections.abc import MutableMapping
from collections import OrderedDict as _default_dict, ChainMap as _ChainMap
import functools
import io
import itertools
import re
import sys
import warnings
__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError",
"NoOptionError", "InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is repeated in an input source.
Possible repetitions that raise this exception are: multiple creation
using the API or in strict parsers when a section is found more than once
in a single input file, string or dictionary.
"""
def __init__(self, section, source=None, lineno=None):
msg = [repr(section), " already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": section ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Section ")
Error.__init__(self, "".join(msg))
self.section = section
self.source = source
self.lineno = lineno
self.args = (section, source, lineno)
class DuplicateOptionError(Error):
"""Raised by strict parsers when an option is repeated in an input source.
Current implementation raises this exception only when an option is found
more than once in a single file, string or dictionary.
"""
def __init__(self, section, option, source=None, lineno=None):
msg = [repr(option), " in section ", repr(section),
" already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": option ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Option ")
Error.__init__(self, "".join(msg))
self.section = section
self.option = option
self.source = source
self.lineno = lineno
self.args = (section, option, source, lineno)
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text contains invalid syntax.
Current implementation raises this exception when the source text into
which substitutions are made does not conform to the required syntax.
"""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, source=None, filename=None):
# Exactly one of `source'/`filename' arguments has to be given.
# `filename' kept for compatibility.
if filename and source:
raise ValueError("Cannot specify both `filename' and `source'. "
"Use `source'.")
elif not filename and not source:
raise ValueError("Required argument `source' not given.")
elif filename:
source = filename
Error.__init__(self, 'Source contains parsing errors: %r' % source)
self.source = source
self.errors = []
self.args = (source, )
@property
def filename(self):
"""Deprecated, use `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
return self.source
@filename.setter
def filename(self, value):
"""Deprecated, user `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
self.source = value
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %r, line: %d\n%r' %
(filename, lineno, line))
self.source = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
# Used in parser getters to indicate the default behaviour when a specific
# option is not found it to raise an exception. Created to enable `None' as
# a valid fallback value.
_UNSET = object()
class Interpolation:
"""Dummy interpolation that passes the value through with no changes."""
def before_get(self, parser, section, option, value, defaults):
return value
def before_set(self, parser, section, option, value):
return value
def before_read(self, parser, section, option, value):
return value
def before_write(self, parser, section, option, value):
return value
class BasicInterpolation(Interpolation):
"""Interpolation as implemented in the classic ConfigParser.
The option values can contain format strings which refer to other values in
the same section, or values in the special default section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand. If a user needs to use a bare % in
a configuration file, she can escape it by writing %%. Other % usage
is considered a user error and raises `InterpolationSyntaxError'."""
_KEYCRE = re.compile(r"%\(([^)]+)\)s")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('%%', '') # escaped percent signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = parser.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(parser, option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', "
"found: %r" % (rest,))
class ExtendedInterpolation(Interpolation):
"""Advanced variant of interpolation, supports the syntax used by
`zc.buildout'. Enables interpolation between sections."""
_KEYCRE = re.compile(r"\$\{([^}]+)\}")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('$$', '') # escaped dollar signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '$' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('$')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("$")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "$":
accum.append("$")
rest = rest[2:]
elif c == "{":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
path = m.group(1).split(':')
rest = rest[m.end():]
sect = section
opt = option
try:
if len(path) == 1:
opt = parser.optionxform(path[0])
v = map[opt]
elif len(path) == 2:
sect = path[0]
opt = parser.optionxform(path[1])
v = parser.get(sect, opt, raw=True)
else:
raise InterpolationSyntaxError(
option, section,
"More than one ':' found: %r" % (rest,))
except (KeyError, NoSectionError, NoOptionError):
raise InterpolationMissingOptionError(
option, section, rest, ":".join(path))
if "$" in v:
self._interpolate_some(parser, opt, accum, v, sect,
dict(parser.items(sect, raw=True)),
depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'$' must be followed by '$' or '{', "
"found: %r" % (rest,))
class LegacyInterpolation(Interpolation):
"""Deprecated interpolation used in old versions of ConfigParser.
Use BasicInterpolation or ExtendedInterpolation instead."""
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def before_get(self, parser, section, option, value, vars):
rawval = value
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
replace = functools.partial(self._interpolation_replace,
parser=parser)
value = self._KEYCRE.sub(replace, value)
try:
value = value % vars
except KeyError as e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
def before_set(self, parser, section, option, value):
return value
@staticmethod
def _interpolation_replace(match, parser):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % parser.optionxform(s)
class RawConfigParser(MutableMapping):
"""ConfigParser that does not do interpolation."""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Interpolation algorithm to be used if the user does not specify another
_DEFAULT_INTERPOLATION = Interpolation()
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
# Possible boolean values in the configuration.
BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, empty_lines_in_values=True,
default_section=DEFAULTSECT,
interpolation=_UNSET):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
self._proxies = self._dict()
self._proxies[default_section] = SectionProxy(self, default_section)
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
self._empty_lines_in_values = empty_lines_in_values
self.default_section=default_section
self._interpolation = interpolation
if self._interpolation is _UNSET:
self._interpolation = self._DEFAULT_INTERPOLATION
if self._interpolation is None:
self._interpolation = Interpolation()
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return list(self._sections.keys())
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
"""
if section == self.default_section:
raise ValueError('Invalid section name: %r' % section)
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
self._proxies[section] = SectionProxy(self, section)
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
return list(opts.keys())
def read(self, filenames, encoding=None):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
except OSError:
continue
read_ok.append(filename)
return read_ok
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The `f' argument must be iterable, returning one line at a time.
Optional second argument is the `source' specifying the name of the
file being read. If not given, it is taken from f.name. If `f' has no
`name' attribute, `<???>' is used.
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string."""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def read_dict(self, dictionary, source='<dict>'):
"""Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
"""
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value)
def readfp(self, fp, filename=None):
"""Deprecated, use read_file instead."""
warnings.warn(
"This method will be removed in future versions. "
"Use 'parser.read_file()' instead.",
DeprecationWarning, stacklevel=2
)
self.read_file(fp, source=filename)
def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `DEFAULTSECT' in that order.
If the key is not found and `fallback' is provided, it is used as
a fallback value. `None' can be provided as a `fallback' value.
If interpolation is enabled and the optional argument `raw' is False,
all interpolations are expanded in the return values.
Arguments `raw', `vars', and `fallback' are keyword only.
The section DEFAULT is special.
"""
try:
d = self._unify_values(section, vars)
except NoSectionError:
if fallback is _UNSET:
raise
else:
return fallback
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
if fallback is _UNSET:
raise NoOptionError(option, section)
else:
return fallback
if raw or value is None:
return value
else:
return self._interpolation.before_get(self, section, option, value,
d)
def _get(self, section, conv, option, **kwargs):
return conv(self.get(section, option, **kwargs))
def getint(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, int, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getfloat(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, float, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getboolean(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, self._convert_to_boolean, option,
raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def items(self, section=_UNSET, raw=False, vars=None):
"""Return a list of (name, value) tuples for each option in a section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
if section is _UNSET:
return super().items()
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
value_getter = lambda option: self._interpolation.before_get(self,
section, option, d[option], d)
if raw:
value_getter = lambda option: d[option]
return [(option, value_getter(option)) for option in d.keys()]
def popitem(self):
"""Remove a section from the parser and return it as
a (section_name, section_proxy) tuple. If no section is present, raise
KeyError.
The section DEFAULT is never returned because it cannot be removed.
"""
for key in self.sections():
value = self[key]
del self[key]
return key, value
raise KeyError
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False."""
if not section or section == self.default_section:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if value:
value = self._interpolation.before_set(self, section, option,
value)
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp, space_around_delimiters=True):
"""Write an .ini-format representation of the configuration state.
If `space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
if space_around_delimiters:
d = " {} ".format(self._delimiters[0])
else:
d = self._delimiters[0]
if self._defaults:
self._write_section(fp, self.default_section,
self._defaults.items(), d)
for section in self._sections:
self._write_section(fp, section,
self._sections[section].items(), d)
def _write_section(self, fp, section_name, section_items, delimiter):
"""Write a single section to the specified `fp'."""
fp.write("[{}]\n".format(section_name))
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key,
value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{}{}\n".format(key, value))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
del self._proxies[section]
return existed
def __getitem__(self, key):
if key != self.default_section and not self.has_section(key):
raise KeyError(key)
return self._proxies[key]
def __setitem__(self, key, value):
# To conform with the mapping protocol, overwrites existing values in
# the section.
# XXX this is not atomic if read_dict fails at any point. Then again,
# no update method in configparser is atomic in this implementation.
if key == self.default_section:
self._defaults.clear()
elif key in self._sections:
self._sections[key].clear()
self.read_dict({key: value})
def __delitem__(self, key):
if key == self.default_section:
raise ValueError("Cannot remove the default section.")
if not self.has_section(key):
raise KeyError(key)
self.remove_section(key)
def __contains__(self, key):
return key == self.default_section or self.has_section(key)
def __len__(self):
return len(self._sections) + 1 # the default section
def __iter__(self):
# XXX does it break when underlying container state changed?
return itertools.chain((self.default_section,), self._sections.keys())
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]'), plus key/value options, indicated by
`name' and `value' delimited with a specific substring (`=' or `:' by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#' and `;' by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
"""
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
else:
# empty line marks end of value
indent_level = sys.maxsize
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
elif sectname == self.default_section:
cursect = self._defaults
else:
cursect = self._dict()
self._sections[sectname] = cursect
self._proxies[sectname] = SectionProxy(self, sectname)
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
self._join_multiline_values()
def _join_multiline_values(self):
defaults = self.default_section, self._defaults
all_sections = itertools.chain((defaults,),
self._sections.items())
for section, options in all_sections:
for name, val in options.items():
if isinstance(val, list):
val = '\n'.join(val).rstrip()
options[name] = self._interpolation.before_read(self,
section,
name, val)
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def _unify_values(self, section, vars):
"""Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
if value is not None:
value = str(value)
vardict[self.optionxform(key)] = value
return _ChainMap(vardict, sectiondict, self._defaults)
def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return self.BOOLEAN_STATES[value.lower()]
def _validate_value_types(self, *, section="", option="", value=""):
"""Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
"""
if not isinstance(section, str):
raise TypeError("section names must be strings")
if not isinstance(option, str):
raise TypeError("option keys must be strings")
if not self._allow_no_value or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
class ConfigParser(RawConfigParser):
"""ConfigParser implementing interpolation."""
_DEFAULT_INTERPOLATION = BasicInterpolation()
def set(self, section, option, value=None):
"""Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value."""
self._validate_value_types(option=option, value=value)
super().set(section, option, value)
def add_section(self, section):
"""Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string."""
self._validate_value_types(section=section)
super().add_section(section)
class SafeConfigParser(ConfigParser):
"""ConfigParser alias for backwards compatibility purposes."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
"The SafeConfigParser class has been renamed to ConfigParser "
"in Python 3.2. This alias will be removed in future versions."
" Use ConfigParser directly instead.",
DeprecationWarning, stacklevel=2
)
class SectionProxy(MutableMapping):
"""A proxy for a single section from a parser."""
def __init__(self, parser, name):
"""Creates a view on a section of the specified `name` in `parser`."""
self._parser = parser
self._name = name
def __repr__(self):
return '<Section: {}>'.format(self._name)
def __getitem__(self, key):
if not self._parser.has_option(self._name, key):
raise KeyError(key)
return self._parser.get(self._name, key)
def __setitem__(self, key, value):
self._parser._validate_value_types(option=key, value=value)
return self._parser.set(self._name, key, value)
def __delitem__(self, key):
if not (self._parser.has_option(self._name, key) and
self._parser.remove_option(self._name, key)):
raise KeyError(key)
def __contains__(self, key):
return self._parser.has_option(self._name, key)
def __len__(self):
return len(self._options())
def __iter__(self):
return self._options().__iter__()
def _options(self):
if self._name != self._parser.default_section:
return self._parser.options(self._name)
else:
return self._parser.defaults()
def get(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.get(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getint(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getint(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getfloat(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getfloat(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getboolean(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getboolean(self._name, option, raw=raw, vars=vars,
fallback=fallback)
@property
def parser(self):
# The parser object of the proxy is read-only.
return self._parser
@property
def name(self):
# The name of the section on a proxy is read-only.
return self._name
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
from collections.abc import MutableMapping
from collections import OrderedDict as _default_dict, ChainMap as _ChainMap
import functools
import io
import itertools
import re
import sys
import warnings
__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError",
"NoOptionError", "InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is repeated in an input source.
Possible repetitions that raise this exception are: multiple creation
using the API or in strict parsers when a section is found more than once
in a single input file, string or dictionary.
"""
def __init__(self, section, source=None, lineno=None):
msg = [repr(section), " already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": section ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Section ")
Error.__init__(self, "".join(msg))
self.section = section
self.source = source
self.lineno = lineno
self.args = (section, source, lineno)
class DuplicateOptionError(Error):
"""Raised by strict parsers when an option is repeated in an input source.
Current implementation raises this exception only when an option is found
more than once in a single file, string or dictionary.
"""
def __init__(self, section, option, source=None, lineno=None):
msg = [repr(option), " in section ", repr(section),
" already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": option ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Option ")
Error.__init__(self, "".join(msg))
self.section = section
self.option = option
self.source = source
self.lineno = lineno
self.args = (section, option, source, lineno)
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text contains invalid syntax.
Current implementation raises this exception when the source text into
which substitutions are made does not conform to the required syntax.
"""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, source=None, filename=None):
# Exactly one of `source'/`filename' arguments has to be given.
# `filename' kept for compatibility.
if filename and source:
raise ValueError("Cannot specify both `filename' and `source'. "
"Use `source'.")
elif not filename and not source:
raise ValueError("Required argument `source' not given.")
elif filename:
source = filename
Error.__init__(self, 'Source contains parsing errors: %r' % source)
self.source = source
self.errors = []
self.args = (source, )
@property
def filename(self):
"""Deprecated, use `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
return self.source
@filename.setter
def filename(self, value):
"""Deprecated, user `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
self.source = value
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %r, line: %d\n%r' %
(filename, lineno, line))
self.source = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
# Used in parser getters to indicate the default behaviour when a specific
# option is not found it to raise an exception. Created to enable `None' as
# a valid fallback value.
_UNSET = object()
class Interpolation:
"""Dummy interpolation that passes the value through with no changes."""
def before_get(self, parser, section, option, value, defaults):
return value
def before_set(self, parser, section, option, value):
return value
def before_read(self, parser, section, option, value):
return value
def before_write(self, parser, section, option, value):
return value
class BasicInterpolation(Interpolation):
"""Interpolation as implemented in the classic ConfigParser.
The option values can contain format strings which refer to other values in
the same section, or values in the special default section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand. If a user needs to use a bare % in
a configuration file, she can escape it by writing %%. Other % usage
is considered a user error and raises `InterpolationSyntaxError'."""
_KEYCRE = re.compile(r"%\(([^)]+)\)s")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('%%', '') # escaped percent signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = parser.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(parser, option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', "
"found: %r" % (rest,))
class ExtendedInterpolation(Interpolation):
"""Advanced variant of interpolation, supports the syntax used by
`zc.buildout'. Enables interpolation between sections."""
_KEYCRE = re.compile(r"\$\{([^}]+)\}")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('$$', '') # escaped dollar signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '$' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('$')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("$")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "$":
accum.append("$")
rest = rest[2:]
elif c == "{":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
path = m.group(1).split(':')
rest = rest[m.end():]
sect = section
opt = option
try:
if len(path) == 1:
opt = parser.optionxform(path[0])
v = map[opt]
elif len(path) == 2:
sect = path[0]
opt = parser.optionxform(path[1])
v = parser.get(sect, opt, raw=True)
else:
raise InterpolationSyntaxError(
option, section,
"More than one ':' found: %r" % (rest,))
except (KeyError, NoSectionError, NoOptionError):
raise InterpolationMissingOptionError(
option, section, rest, ":".join(path))
if "$" in v:
self._interpolate_some(parser, opt, accum, v, sect,
dict(parser.items(sect, raw=True)),
depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'$' must be followed by '$' or '{', "
"found: %r" % (rest,))
class LegacyInterpolation(Interpolation):
"""Deprecated interpolation used in old versions of ConfigParser.
Use BasicInterpolation or ExtendedInterpolation instead."""
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def before_get(self, parser, section, option, value, vars):
rawval = value
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
replace = functools.partial(self._interpolation_replace,
parser=parser)
value = self._KEYCRE.sub(replace, value)
try:
value = value % vars
except KeyError as e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
def before_set(self, parser, section, option, value):
return value
@staticmethod
def _interpolation_replace(match, parser):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % parser.optionxform(s)
class RawConfigParser(MutableMapping):
"""ConfigParser that does not do interpolation."""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Interpolation algorithm to be used if the user does not specify another
_DEFAULT_INTERPOLATION = Interpolation()
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
# Possible boolean values in the configuration.
BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, empty_lines_in_values=True,
default_section=DEFAULTSECT,
interpolation=_UNSET):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
self._proxies = self._dict()
self._proxies[default_section] = SectionProxy(self, default_section)
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
self._empty_lines_in_values = empty_lines_in_values
self.default_section=default_section
self._interpolation = interpolation
if self._interpolation is _UNSET:
self._interpolation = self._DEFAULT_INTERPOLATION
if self._interpolation is None:
self._interpolation = Interpolation()
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return list(self._sections.keys())
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
"""
if section == self.default_section:
raise ValueError('Invalid section name: %r' % section)
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
self._proxies[section] = SectionProxy(self, section)
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
return list(opts.keys())
def read(self, filenames, encoding=None):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
except OSError:
continue
read_ok.append(filename)
return read_ok
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The `f' argument must be iterable, returning one line at a time.
Optional second argument is the `source' specifying the name of the
file being read. If not given, it is taken from f.name. If `f' has no
`name' attribute, `<???>' is used.
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string."""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def read_dict(self, dictionary, source='<dict>'):
"""Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
"""
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value)
def readfp(self, fp, filename=None):
"""Deprecated, use read_file instead."""
warnings.warn(
"This method will be removed in future versions. "
"Use 'parser.read_file()' instead.",
DeprecationWarning, stacklevel=2
)
self.read_file(fp, source=filename)
def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `DEFAULTSECT' in that order.
If the key is not found and `fallback' is provided, it is used as
a fallback value. `None' can be provided as a `fallback' value.
If interpolation is enabled and the optional argument `raw' is False,
all interpolations are expanded in the return values.
Arguments `raw', `vars', and `fallback' are keyword only.
The section DEFAULT is special.
"""
try:
d = self._unify_values(section, vars)
except NoSectionError:
if fallback is _UNSET:
raise
else:
return fallback
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
if fallback is _UNSET:
raise NoOptionError(option, section)
else:
return fallback
if raw or value is None:
return value
else:
return self._interpolation.before_get(self, section, option, value,
d)
def _get(self, section, conv, option, **kwargs):
return conv(self.get(section, option, **kwargs))
def getint(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, int, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getfloat(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, float, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getboolean(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, self._convert_to_boolean, option,
raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def items(self, section=_UNSET, raw=False, vars=None):
"""Return a list of (name, value) tuples for each option in a section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
if section is _UNSET:
return super().items()
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
value_getter = lambda option: self._interpolation.before_get(self,
section, option, d[option], d)
if raw:
value_getter = lambda option: d[option]
return [(option, value_getter(option)) for option in d.keys()]
def popitem(self):
"""Remove a section from the parser and return it as
a (section_name, section_proxy) tuple. If no section is present, raise
KeyError.
The section DEFAULT is never returned because it cannot be removed.
"""
for key in self.sections():
value = self[key]
del self[key]
return key, value
raise KeyError
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False."""
if not section or section == self.default_section:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if value:
value = self._interpolation.before_set(self, section, option,
value)
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp, space_around_delimiters=True):
"""Write an .ini-format representation of the configuration state.
If `space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
if space_around_delimiters:
d = " {} ".format(self._delimiters[0])
else:
d = self._delimiters[0]
if self._defaults:
self._write_section(fp, self.default_section,
self._defaults.items(), d)
for section in self._sections:
self._write_section(fp, section,
self._sections[section].items(), d)
def _write_section(self, fp, section_name, section_items, delimiter):
"""Write a single section to the specified `fp'."""
fp.write("[{}]\n".format(section_name))
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key,
value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{}{}\n".format(key, value))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
del self._proxies[section]
return existed
def __getitem__(self, key):
if key != self.default_section and not self.has_section(key):
raise KeyError(key)
return self._proxies[key]
def __setitem__(self, key, value):
# To conform with the mapping protocol, overwrites existing values in
# the section.
# XXX this is not atomic if read_dict fails at any point. Then again,
# no update method in configparser is atomic in this implementation.
if key == self.default_section:
self._defaults.clear()
elif key in self._sections:
self._sections[key].clear()
self.read_dict({key: value})
def __delitem__(self, key):
if key == self.default_section:
raise ValueError("Cannot remove the default section.")
if not self.has_section(key):
raise KeyError(key)
self.remove_section(key)
def __contains__(self, key):
return key == self.default_section or self.has_section(key)
def __len__(self):
return len(self._sections) + 1 # the default section
def __iter__(self):
# XXX does it break when underlying container state changed?
return itertools.chain((self.default_section,), self._sections.keys())
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]'), plus key/value options, indicated by
`name' and `value' delimited with a specific substring (`=' or `:' by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#' and `;' by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
"""
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
else:
# empty line marks end of value
indent_level = sys.maxsize
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
elif sectname == self.default_section:
cursect = self._defaults
else:
cursect = self._dict()
self._sections[sectname] = cursect
self._proxies[sectname] = SectionProxy(self, sectname)
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
self._join_multiline_values()
def _join_multiline_values(self):
defaults = self.default_section, self._defaults
all_sections = itertools.chain((defaults,),
self._sections.items())
for section, options in all_sections:
for name, val in options.items():
if isinstance(val, list):
val = '\n'.join(val).rstrip()
options[name] = self._interpolation.before_read(self,
section,
name, val)
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def _unify_values(self, section, vars):
"""Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
if value is not None:
value = str(value)
vardict[self.optionxform(key)] = value
return _ChainMap(vardict, sectiondict, self._defaults)
def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return self.BOOLEAN_STATES[value.lower()]
def _validate_value_types(self, *, section="", option="", value=""):
"""Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
"""
if not isinstance(section, str):
raise TypeError("section names must be strings")
if not isinstance(option, str):
raise TypeError("option keys must be strings")
if not self._allow_no_value or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
class ConfigParser(RawConfigParser):
"""ConfigParser implementing interpolation."""
_DEFAULT_INTERPOLATION = BasicInterpolation()
def set(self, section, option, value=None):
"""Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value."""
self._validate_value_types(option=option, value=value)
super().set(section, option, value)
def add_section(self, section):
"""Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string."""
self._validate_value_types(section=section)
super().add_section(section)
class SafeConfigParser(ConfigParser):
"""ConfigParser alias for backwards compatibility purposes."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
"The SafeConfigParser class has been renamed to ConfigParser "
"in Python 3.2. This alias will be removed in future versions."
" Use ConfigParser directly instead.",
DeprecationWarning, stacklevel=2
)
class SectionProxy(MutableMapping):
"""A proxy for a single section from a parser."""
def __init__(self, parser, name):
"""Creates a view on a section of the specified `name` in `parser`."""
self._parser = parser
self._name = name
def __repr__(self):
return '<Section: {}>'.format(self._name)
def __getitem__(self, key):
if not self._parser.has_option(self._name, key):
raise KeyError(key)
return self._parser.get(self._name, key)
def __setitem__(self, key, value):
self._parser._validate_value_types(option=key, value=value)
return self._parser.set(self._name, key, value)
def __delitem__(self, key):
if not (self._parser.has_option(self._name, key) and
self._parser.remove_option(self._name, key)):
raise KeyError(key)
def __contains__(self, key):
return self._parser.has_option(self._name, key)
def __len__(self):
return len(self._options())
def __iter__(self):
return self._options().__iter__()
def _options(self):
if self._name != self._parser.default_section:
return self._parser.options(self._name)
else:
return self._parser.defaults()
def get(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.get(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getint(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getint(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getfloat(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getfloat(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getboolean(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getboolean(self._name, option, raw=raw, vars=vars,
fallback=fallback)
@property
def parser(self):
# The parser object of the proxy is read-only.
return self._parser
@property
def name(self):
# The name of the section on a proxy is read-only.
return self._name
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
ArcherSys/ArcherSys
|
Lib/configparser.py
|
Python
|
mit
| 148,451
|
from typing import Dict, List
from service.ws_re.register._base import Register
from service.ws_re.register.author import Author
from service.ws_re.register.authors import Authors
from service.ws_re.register.lemma import Lemma
from service.ws_re.register.register_types.volume import VolumeRegister
class PublicDomainRegister(Register):
def __init__(self,
year: int,
authors: Authors,
registers: Dict[str, VolumeRegister]):
super().__init__()
self.year: int = year
self._authors: Authors = authors
self._registers = registers
self._pd_authors: List[Author] = self._get_pd_authors()
self._init_lemmas()
def __repr__(self):
return f"<{self.__class__.__name__} - year:{self.year}, lemmas:{len(self)}>"
def __len__(self):
return len(self.squash_lemmas(self._lemmas))
def __getitem__(self, item: int) -> Lemma:
return self._lemmas[item]
def _init_lemmas(self):
lemmas = []
for volume_str in self._registers:
for lemma in self._registers[volume_str].lemmas:
if self._is_lemma_of_author(lemma):
lemmas.append(lemma)
self._lemmas = sorted(lemmas, key=lambda k: (k.sort_key, k.volume.sort_key))
def _get_pd_authors(self) -> List[Author]:
author_list = []
for author in self._authors:
if author.death:
if author.death == self.year - 71:
author_list.append(author)
continue
if author.birth == self.year - 171:
author_list.append(author)
return author_list
def _is_lemma_of_author(self, lemma: Lemma) -> bool:
for chapter in lemma.chapters:
if chapter.author:
authors_of_lemma = self._authors.get_author_by_mapping(chapter.author, lemma.volume.name)
for author in self._pd_authors:
if author in authors_of_lemma:
return True
return False
def _get_table(self) -> str:
header = """{|class="wikitable sortable"
!Artikel
!Band
!Status
!Wikilinks
!Seite
!Autor
!Sterbejahr"""
table = [header]
for lemmas in self.squash_lemmas(self._lemmas):
chapter_sum = 0
table_rows = []
lemma = None
for lemma in lemmas:
# if there are no chapters ... one line must be added no madder what
chapter_sum += max(len(lemma.chapters), 1)
table_rows.append(lemma.get_table_row(print_volume=True))
# strip |-/n form the first line it is later replaced by the lemma line
table_rows[0] = table_rows[0][3:]
if chapter_sum > 1:
table.append(f"|-\n|rowspan={chapter_sum} data-sort-value=\"{lemma.sort_key}\"|{lemma.get_link()}")
else:
table.append(f"|-\n|data-sort-value=\"{lemma.sort_key}\"|{lemma.get_link()}")
table += table_rows
table.append("|}")
return "\n".join(table)
def get_register_str(self) -> str:
return f"{self._get_table()}\n[[Kategorie:RE:Register|!]]"
|
the-it/WS_THEbotIT
|
service/ws_re/register/register_types/public_domain.py
|
Python
|
mit
| 3,225
|
#!/usr/bin/python2.7
from setuptools import setup, find_packages
setup(
name='bftest_cli',
version='0.1a',
description='A wrapper to manage docker instances',
url='https://github.com/wnormandin/bftest_cli',
author='wnormandin',
author_email='bill@pokeybill.us',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python 2.7'
],
packages=find_packages(),
install_requires=['docker','click'],
py_modules=['bftest_cli'],
entry_points="""
[console_scripts]
dockcli=cli.dockcli:default
""",
)
|
wnormandin/bftest_cli
|
setup.py
|
Python
|
mit
| 762
|
from stt_watson.SttWatsonAbstractListener import SttWatsonAbstractListener
class SttWatsonLogListener(SttWatsonAbstractListener):
def __init__(self):
pass
def listenHypothesis(self, hypothesis):
print "Hypothesis: {0}".format(hypothesis)
def listenPayload(self, payload):
print(u"Text message received: {0}".format(payload))
def listenInterimHypothesis(self, interimHypothesis):
print "Interim hypothesis: {0}".format(interimHypothesis)
|
HomeHabbit/stt-watson
|
stt_watson/SttWatsonLogListener.py
|
Python
|
mit
| 490
|
from __future__ import annotations
from ..typecheck import *
from ..import ui
from ..import dap
from . import css
from .breakpoints_panel import BreakpointsPanel
from .input_list_view import InputListView
if TYPE_CHECKING:
from ..debugger import Debugger
class DebuggerPanel(ui.div):
on_settings: Callable[[], Any]
on_start: Callable[[], Any]
on_stop: Callable[[], Any]
on_pause: Callable[[], Any]
on_continue: Callable[[], Any]
on_step_over: Callable[[], Any]
on_step_out: Callable[[], Any]
on_step_in: Callable[[], Any]
def __init__(self, debugger: Debugger, on_navigate_to_source: Callable[[dap.SourceLocation], None]) -> None:
super().__init__()
self.debugger = debugger
self.breakpoints = BreakpointsPanel(debugger.breakpoints, on_navigate_to_source)
self.debugger.on_session_state_updated.add(lambda session, state: self.dirty())
self.debugger.on_session_active.add(self.on_selected_session)
self.debugger.on_session_added.add(self.on_selected_session)
self.last_active_adapter = None
def on_selected_session(self, session: dap.Session):
self.last_active_adapter = session.adapter_configuration
self.dirty()
def render(self) -> ui.div.Children:
items = [
DebuggerCommandButton(self.on_settings, ui.Images.shared.settings, 'Settings'),
DebuggerCommandButton(self.on_start, ui.Images.shared.play, 'Start'),
]
if self.debugger.is_stoppable():
items.append(DebuggerCommandButton(self.on_stop, ui.Images.shared.stop, 'Stop'))
else:
items.append(DebuggerCommandButton(self.on_stop, ui.Images.shared.stop_disable, 'Stop (Disabled)'))
if self.debugger.is_running():
items.append(DebuggerCommandButton(self.on_pause, ui.Images.shared.pause, 'Pause'))
elif self.debugger.is_paused():
items.append(DebuggerCommandButton(self.on_continue, ui.Images.shared.resume, 'Continue'))
else:
items.append(DebuggerCommandButton(self.on_pause, ui.Images.shared.pause_disable, 'Pause (Disabled)'))
if self.debugger.is_paused():
items.extend([
DebuggerCommandButton(self.on_step_over, ui.Images.shared.down, 'Step Over'),
DebuggerCommandButton(self.on_step_out, ui.Images.shared.left, 'Step Out'),
DebuggerCommandButton(self.on_step_in, ui.Images.shared.right, 'Step In'),
])
else:
items.extend([
DebuggerCommandButton(self.on_step_over, ui.Images.shared.down_disable, 'Step Over (Disabled)'),
DebuggerCommandButton(self.on_step_out, ui.Images.shared.left_disable, 'Step Out (Disabled)'),
DebuggerCommandButton(self.on_step_in, ui.Images.shared.right_disable, 'Step In (Disabled)'),
])
# looks like
# current status
# breakpoints ...
if self.debugger.is_active:
self.last_active_adapter = self.debugger.active.adapter_configuration or self.last_active_adapter
panel_items: list[ui.div] = []
if self.debugger.is_active:
session = self.debugger.active
status = session.status
if status:
panel_items.append(ui.div(height=css.row_height)[
ui.text(status, css=css.label_secondary)
])
if self.last_active_adapter:
settings = self.last_active_adapter.settings(self.debugger)
for setting in settings:
panel_items.append(InputListView(setting))
div = self.last_active_adapter.ui(self.debugger)
if div: panel_items.append(div)
panel_items.append(self.breakpoints)
return [
ui.div()[
ui.div(height=css.header_height)[items],
ui.div(width=30 - css.rounded_panel.padding_width, height=1000, css=css.rounded_panel)[
panel_items
],
]
]
class DebuggerCommandButton (ui.span):
def __init__(self, callback: Callable[[], Any], image: ui.Image, title: str) -> None:
super().__init__()
self.image = image
self.callback = callback
self.title = title
def render(self) -> ui.span.Children:
return [
ui.span(css=css.padding)[
ui.click(self.callback, title=self.title)[
ui.icon(self.image),
]
]
]
|
dmilith/SublimeText3-dmilith
|
Packages/Debugger/modules/views/debugger_panel.py
|
Python
|
mit
| 3,906
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Acrisel LTD
# Copyright (C) 2008- Acrisel (acrisel.com) . All Rights Reserved
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import time
from acris import resource_pool as rp
from acris import threaded
import queue
from datetime import datetime
from acris import traced_method
traced=traced_method(print, True)
class MyResource1(rp.Resource): pass
class MyResource2(rp.Resource): pass
rp1=rp.ResourcePool('RP1', resource_cls=MyResource1, policy={'resource_limit': 2, }).load()
rp2=rp.ResourcePool('RP2', resource_cls=MyResource2, policy={'resource_limit': 1, }).load()
class Callback(object):
def __init__(self, notify_queue):
self.q=notify_queue
def __call__(self, ticket=None):
self.q.put(ticket)
@threaded
def worker_callback(name, rp):
print('[ %s ] %s getting resource' % (str(datetime.now()), name))
notify_queue=queue.Queue()
callback=Callback(notify_queue)
r=rp.get(callback=callback)
if not r:
print('[ %s ] %s doing work before resource available' % (str(datetime.now()), name,))
print('[ %s ] %s waiting for resources' % (str(datetime.now()), name,))
ticket=notify_queue.get()
r=rp.get(ticket=ticket)
print('[ %s ] %s doing work (%s)' % (str(datetime.now()), name, repr(r)))
time.sleep(2)
print('[ %s ] %s returning (%s)' % (str(datetime.now()), name, repr(r)))
rp.put(*r)
r1=worker_callback('>>> w11-callback', rp1)
r2=worker_callback('>>> w21-callback', rp2)
r3=worker_callback('>>> w22-callback', rp2)
r4=worker_callback('>>> w12-callback', rp1)
|
Acrisel/acris
|
acris/acris_example/resource_pool_callback.py
|
Python
|
mit
| 2,420
|
from kaleidoscope.globals import SecType
class OrderLeg(object):
def __init__(self, quantity, contract):
"""
This class is an abstraction of an order leg of an option strategy. It holds the information
for a single order leg as part of an entire option strategy.
"""
self.quantity = quantity
self.contract = contract
def reverse(self):
""" reverse the the position by negating the quantity """
self.quantity *= -1
class OptionLeg(OrderLeg):
""" Holds information of an option leg """
def __init__(self, option, quantity):
self.sec_type = SecType.OPT
super().__init__(quantity, option)
class StockLeg(OrderLeg):
""" Holds information of an stock leg """
def __init__(self, symbol, quantity):
self.sec_type = SecType.STK
super().__init__(quantity, symbol)
|
michaelchu/kaleidoscope
|
kaleidoscope/options/order_leg.py
|
Python
|
mit
| 884
|
import pytest
from apispec import yaml_utils
def test_load_yaml_from_docstring():
def f():
"""
Foo
bar
baz quux
---
herp: 1
derp: 2
"""
result = yaml_utils.load_yaml_from_docstring(f.__doc__)
assert result == {"herp": 1, "derp": 2}
@pytest.mark.parametrize("docstring", (None, "", "---"))
def test_load_yaml_from_docstring_empty_docstring(docstring):
assert yaml_utils.load_yaml_from_docstring(docstring) == {}
@pytest.mark.parametrize("docstring", (None, "", "---"))
def test_load_operations_from_docstring_empty_docstring(docstring):
assert yaml_utils.load_operations_from_docstring(docstring) == {}
def test_dict_to_yaml_unicode():
assert yaml_utils.dict_to_yaml({"가": "나"}) == '"\\uAC00": "\\uB098"\n'
assert yaml_utils.dict_to_yaml({"가": "나"}, {"allow_unicode": True}) == "가: 나\n"
|
marshmallow-code/apispec
|
tests/test_yaml_utils.py
|
Python
|
mit
| 908
|
import unittest
import os
os.environ['MIAMI_ENV'] = 'test'
import simplejson as json
from miami.models import Team, User, Task
class ModelsTest(unittest.TestCase):
def test_team_toJSON(self):
team = Team('Log')
team.id = 1
self.assertEquals({'id':1, 'name': 'Log', 'color': '2a33d8'}, team.toJSON())
def test_user_toJSON(self):
user = User('Mike')
self.assertEquals({'name': 'Mike', 'gravater': '91f376c4b36912e5075b6170d312eab5'}, user.toJSON())
def test_task_toJSON(self):
team = Team('Log')
team.id = 1
task = Task('title1', 'detail', status='DONE', price=1, estimate=4, team=team)
task.id = 1
task.owner = User('Mike')
self.assertEquals({'id': 1, 'title': 'title1', 'detail': 'detail', 'status': 'DONE', 'price': 1, 'estimate': 4,'priority': 100,'time_slots': [], 'consuming': '0','created_time': 'just now', 'last_updated': 'just now', 'team': {
'name': 'Log', 'color': '2a33d8', 'id':1}, 'owner': {'name': 'Mike', 'gravater': '91f376c4b36912e5075b6170d312eab5'}, 'partner': {}}, task.toJSON())
|
archiechen/miami
|
tests/models_tests.py
|
Python
|
mit
| 1,133
|
"""
Global Settings
"""
settings = {}
# try:
# conf = __import__("django.conf")
# settings = conf.conf.settings
# except ImportError:
# settings = {}
# Broker - amqp,
BROKER_URL = getattr(settings, "BROKER_URL", "amqp://guest@127.0.0.1//")
# file storage path (default: memory), e.g. /tmp/tracker.db
CELERY_TRACKER_STORAGE = getattr(settings, "CELERY_TRACKER_STORAGE", "")
# Log level
CELERY_TRACKER_LOG_LEVEL = getattr(settings, "CELERY_TRACKER_LOG_LEVEL", "INFO")
# plugins
CELERY_TRACKER_PLUGINS = getattr(settings, "CELERY_TRACKER_PLUGINS", {
"fluent": {
"class": "tracker.plugins.fluent.FluentPlugin",
"verbose": 0,
"interval": 20,
"tag": "celery.tracker",
"host": "127.0.0.1",
"port": 24224
},
"zabbix": {
"class": "tracker.plugins.zabbix.ZabbixPlugin",
"verbose": 0,
"interval": 20,
"tag": "celery.tracker",
"host": "127.0.0.1",
"port": 10051,
"metrics": [
{"host": "celery-agent"},
]
},
"receive": {
"class": "tracker.plugins.receive.ReceivePlugin",
"verbose": 0,
"tag": "celery.tracker",
"host": "0.0.0.0",
"port": 27015,
},
#"logging": {
# "class": "tracker.plugins.logging.LoggingPlugin",
# "tag": "celery.tracker",
# "interval": 10,
# "verbose": True
#},
})
|
ikeikeikeike/celery-tracker
|
tracker/configs/celeryconfig.py
|
Python
|
mit
| 1,416
|
my_file = open("/tmp/my_file", "w")
my_file.write("Test string")
my_file.close()
my_file = open("/tmp/my_file", "r")
content = my_file.read()
my_file.close()
if (content == "Test string"):
print("OK")
else:
print("KO")
|
Nakrez/RePy
|
tests/parser/good/file.py
|
Python
|
mit
| 229
|
"""Second module, imported as inspect_recursive.a, with no contents"""
import inspect_recursive.first as a
import sys
if sys.version_info >= (3, 7):
# For some reason 3.6 says second doesn't exist yet. I get that, it's a
# cyclic reference, but that works in 3.7.
import inspect_recursive.second as b
from inspect_recursive import Foo as Bar
|
mosra/m.css
|
documentation/test_python/inspect_recursive/inspect_recursive/second.py
|
Python
|
mit
| 358
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# -------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError, ServiceRequestError
import pytest
from azure.core.rest import HttpRequest
from azure.core.exceptions import StreamClosedError, StreamConsumedError, ResponseNotReadError
@pytest.mark.asyncio
async def test_iter_raw(client):
request = HttpRequest("GET", "/streams/basic")
async with client.send_request(request, stream=True) as response:
raw = b""
async for part in response.iter_raw():
raw += part
assert raw == b"Hello, world!"
@pytest.mark.asyncio
async def test_iter_raw_on_iterable(client):
request = HttpRequest("GET", "/streams/iterable")
async with client.send_request(request, stream=True) as response:
raw = b""
async for part in response.iter_raw():
raw += part
assert raw == b"Hello, world!"
@pytest.mark.asyncio
async def test_iter_with_error(client):
request = HttpRequest("GET", "/errors/403")
async with client.send_request(request, stream=True) as response:
try:
response.raise_for_status()
except HttpResponseError as e:
pass
assert response.is_closed
try:
async with client.send_request(request, stream=True) as response:
response.raise_for_status()
except HttpResponseError as e:
pass
assert response.is_closed
request = HttpRequest("GET", "http://doesNotExist")
with pytest.raises(ServiceRequestError):
async with (await client.send_request(request, stream=True)):
raise ValueError("Should error before entering")
assert response.is_closed
@pytest.mark.asyncio
async def test_iter_bytes(client):
request = HttpRequest("GET", "/streams/basic")
async with client.send_request(request, stream=True) as response:
raw = b""
async for chunk in response.iter_bytes():
assert response.is_stream_consumed
assert not response.is_closed
raw += chunk
assert response.is_stream_consumed
assert response.is_closed
assert raw == b"Hello, world!"
@pytest.mark.skip(reason="We've gotten rid of iter_text for now")
@pytest.mark.asyncio
async def test_iter_text(client):
request = HttpRequest("GET", "/basic/string")
async with client.send_request(request, stream=True) as response:
content = ""
async for part in response.iter_text():
content += part
assert content == "Hello, world!"
@pytest.mark.skip(reason="We've gotten rid of iter_lines for now")
@pytest.mark.asyncio
async def test_iter_lines(client):
request = HttpRequest("GET", "/basic/lines")
async with client.send_request(request, stream=True) as response:
content = []
async for part in response.iter_lines():
content.append(part)
assert content == ["Hello,\n", "world!"]
@pytest.mark.asyncio
async def test_streaming_response(client):
request = HttpRequest("GET", "/streams/basic")
async with client.send_request(request, stream=True) as response:
assert response.status_code == 200
assert not response.is_closed
content = await response.read()
assert content == b"Hello, world!"
assert response.content == b"Hello, world!"
assert response.is_closed
@pytest.mark.asyncio
async def test_cannot_read_after_stream_consumed(port, client):
request = HttpRequest("GET", "/streams/basic")
async with client.send_request(request, stream=True) as response:
content = b""
async for chunk in response.iter_bytes():
content += chunk
with pytest.raises(StreamConsumedError) as ex:
await response.read()
assert "<HttpRequest [GET], url: 'http://localhost:{}/streams/basic'>".format(port) in str(ex.value)
assert "You have likely already consumed this stream, so it can not be accessed anymore" in str(ex.value)
@pytest.mark.asyncio
async def test_cannot_read_after_response_closed(port, client):
request = HttpRequest("GET", "/streams/basic")
async with client.send_request(request, stream=True) as response:
pass
with pytest.raises(StreamClosedError) as ex:
await response.read()
assert "<HttpRequest [GET], url: 'http://localhost:{}/streams/basic'>".format(port) in str(ex.value)
assert "can no longer be read or streamed, since the response has already been closed" in str(ex.value)
@pytest.mark.asyncio
async def test_decompress_plain_no_header(client):
# thanks to Xiang Yan for this test!
account_name = "coretests"
url = "https://{}.blob.core.windows.net/tests/test.txt".format(account_name)
request = HttpRequest("GET", url)
async with client:
response = await client.send_request(request, stream=True)
with pytest.raises(ResponseNotReadError):
response.content
await response.read()
assert response.content == b"test"
@pytest.mark.asyncio
async def test_compress_plain_no_header(client):
# thanks to Xiang Yan for this test!
account_name = "coretests"
url = "https://{}.blob.core.windows.net/tests/test.txt".format(account_name)
request = HttpRequest("GET", url)
async with client:
response = await client.send_request(request, stream=True)
iter = response.iter_raw()
data = b""
async for d in iter:
data += d
assert data == b"test"
@pytest.mark.asyncio
async def test_iter_read_back_and_forth(client):
# thanks to McCoy Patiño for this test!
# while this test may look like it's exposing buggy behavior, this is httpx's behavior
# the reason why the code flow is like this, is because the 'iter_x' functions don't
# actually read the contents into the response, the output them. Once they're yielded,
# the stream is closed, so you have to catch the output when you iterate through it
request = HttpRequest("GET", "/basic/string")
async with client.send_request(request, stream=True) as response:
async for part in response.iter_bytes():
assert part
with pytest.raises(ResponseNotReadError):
response.text()
with pytest.raises(StreamConsumedError):
await response.read()
with pytest.raises(ResponseNotReadError):
response.text()
@pytest.mark.asyncio
async def test_stream_with_return_pipeline_response(client):
request = HttpRequest("GET", "/basic/string")
pipeline_response = await client.send_request(request, stream=True, _return_pipeline_response=True)
assert hasattr(pipeline_response, "http_request")
assert hasattr(pipeline_response.http_request, "content")
assert hasattr(pipeline_response, "http_response")
assert hasattr(pipeline_response, "context")
parts = []
async for part in pipeline_response.http_response.iter_bytes():
parts.append(part)
assert parts == [b'Hello, world!']
await client.close()
@pytest.mark.asyncio
async def test_error_reading(client):
request = HttpRequest("GET", "/errors/403")
async with client.send_request(request, stream=True) as response:
await response.read()
assert response.content == b""
response.content
response = await client.send_request(request, stream=True)
with pytest.raises(HttpResponseError):
response.raise_for_status()
await response.read()
assert response.content == b""
await client.close()
@pytest.mark.asyncio
async def test_pass_kwarg_to_iter_bytes(client):
request = HttpRequest("GET", "/basic/string")
response = await client.send_request(request, stream=True)
async for part in response.iter_bytes(chunk_size=5):
assert part
@pytest.mark.asyncio
async def test_pass_kwarg_to_iter_raw(client):
request = HttpRequest("GET", "/basic/string")
response = await client.send_request(request, stream=True)
async for part in response.iter_raw(chunk_size=5):
assert part
@pytest.mark.asyncio
async def test_decompress_compressed_header(client):
# expect plain text
request = HttpRequest("GET", "/encoding/gzip")
response = await client.send_request(request)
content = await response.read()
assert content == b"hello world"
assert response.content == content
assert response.text() == "hello world"
@pytest.mark.asyncio
async def test_decompress_compressed_header_stream(client):
# expect plain text
request = HttpRequest("GET", "/encoding/gzip")
response = await client.send_request(request, stream=True)
content = await response.read()
assert content == b"hello world"
assert response.content == content
assert response.text() == "hello world"
@pytest.mark.asyncio
async def test_decompress_compressed_header_stream_body_content(client):
# expect plain text
request = HttpRequest("GET", "/encoding/gzip")
response = await client.send_request(request, stream=True)
await response.read()
content = response.content
assert content == response.body()
|
Azure/azure-sdk-for-python
|
sdk/core/azure-core/tests/async_tests/test_rest_stream_responses_async.py
|
Python
|
mit
| 9,371
|
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the KombatKoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "KombatKoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
kombatcoin/KBK1
|
share/qt/clean_mac_info_plist.py
|
Python
|
mit
| 901
|
# -*- coding: utf-8 -*-
import logging
import os
from django.conf import settings
from django.core.management import BaseCommand
from django.apps import apps
from django.core.management import CommandError
from django.template.defaultfilters import slugify
from django.template.loader import get_template
from django.db import models
logger = logging.getLogger(__name__)
class BaseGenerator(object):
template_names = []
def __init__(self, context, path):
self.context = context
self.path = path
def get_destination(self, template_name, app_name="", model_name=""):
destination = self.path + template_name.replace(
'scaffold/', '/'
).replace(
'.py.html', '.py'
).replace(
'APP_NAME', app_name
).replace(
'MODEL_NAME', model_name
)
# Create the directory if it does not exist.
directory = os.path.dirname(destination)
if not os.path.exists(directory):
os.makedirs(directory)
return destination
def generate(self):
for template_name in self.template_names:
template = get_template(template_name)
data = template.render(self.context)
destination = self.get_destination(template_name, app_name=self.context['app_name'])
with open(destination, 'wb') as out:
out.write(data.encode('utf-8'))
logger.info(u"Write %s", destination)
class SingleFileGenerator(BaseGenerator):
"""SingeFileGenerator uses the complete context (all models) per template."""
template_names = [
'scaffold/admin.py.html',
'scaffold/context_processors.py.html',
'scaffold/model_mixins.py.html',
'scaffold/static/APP_NAME/styles.css',
'scaffold/templates/APP_NAME/index.html',
'scaffold/templates/APP_NAME/pagination.html',
'scaffold/templates/base.html',
'scaffold/templatetags/__init__.py',
'scaffold/templatetags/APP_NAME_tags.py',
'scaffold/urls.py.html',
'scaffold/views.py.html',
]
class MultiFileGenerator(BaseGenerator):
"""MultiFileGenerator splits the context into a context for each model. It generates multiple files per model."""
template_names = [
'scaffold/templates/APP_NAME/MODEL_NAME_base.html',
'scaffold/templates/APP_NAME/MODEL_NAME_confirm_delete.html',
'scaffold/templates/APP_NAME/MODEL_NAME_detail.html',
'scaffold/templates/APP_NAME/MODEL_NAME_form.html',
'scaffold/templates/APP_NAME/MODEL_NAME_list.html',
'scaffold/templates/APP_NAME/MODEL_NAME_table_detail.html',
'scaffold/templates/APP_NAME/MODEL_NAME_table_list.html',
]
def generate(self):
for obj in self.context['items']:
date_template_names = []
if obj['date_fields']:
date_template_names = [
'scaffold/templates/APP_NAME/MODEL_NAME_archive.html',
'scaffold/templates/APP_NAME/MODEL_NAME_archive_day.html',
'scaffold/templates/APP_NAME/MODEL_NAME_archive_month.html',
'scaffold/templates/APP_NAME/MODEL_NAME_archive_week.html',
'scaffold/templates/APP_NAME/MODEL_NAME_archive_year.html',
]
for template_name in self.template_names + date_template_names:
template = get_template(template_name)
data = template.render(obj)
destination = self.get_destination(template_name, obj['app_name'], obj['url_name'])
with open(destination, 'w') as out:
out.write(data)
logger.debug("Write %s", destination)
class Command(BaseCommand):
"""The handle method is executed by the `./manage.py build app_name` command.
Introspect all models in the given app and call generators.
The get fields methods are loosely based on:
https://docs.djangoproject.com/en/1.10/ref/models/meta/
"""
def add_arguments(self, parser):
parser.add_argument('app_name', nargs='+', type=str)
def get_fields(self, model):
"""All model fields, fields dynamically added from the other end excluded.
`include_hidden` is False by default. If set to True, get_fields() will include fields that are used to
back other field’s functionality. This will also include any fields that have a related_name (such as
ManyToManyField, or ForeignKey) that start with a `+`."""
return [field.name for field in model._meta.get_fields(include_hidden=False)]
def get_concrete_fields(self, model):
"""All model fields, like get_fields but NO backward related fields."""
fields = [
(f, f.model if f.model != model else None)
for f in model._meta.get_fields()
if f.concrete and (
not f.is_relation
or f.one_to_one
or (f.many_to_one and f.related_model)
)
]
return [field.name for field, model in fields]
def get_related_fields(self, model):
"""Related fields like ForeignKey, OneToOne fields."""
return [
field.name
for field in model._meta.get_fields()
if (field.one_to_many or field.one_to_one)
and field.auto_created and not field.concrete
]
def get_many_to_many_fields(self, model):
"""ManyToMany fields"""
return [
field.name
for field in model._meta.get_fields()
if field.many_to_many and not field.auto_created
]
def get_date_fields(self, model):
"""Date or datetime fields"""
return [
field.name for field in model._meta.get_fields()
if field.__class__ in (models.DateField, models.DateTimeField)
]
def get_text_fields(self, model):
"""Text fields"""
return [
field.name for field in model._meta.get_fields()
if field.__class__ in (models.CharField, models.TextField)
]
def get_related_with_models(self, model):
fields = [
(f.related_model.__name__, f.model if f.model != model else None)
for f in model._meta.get_fields()
if (f.one_to_many or f.one_to_one)
and f.auto_created and not f.concrete
]
return list(set([model_name for model_name, _ in fields]))
def handle(self, *args, **options):
"""Handle the command"""
# Raise error if app is not in INSTALLED_APPS.
app_name = options['app_name'][0]
if app_name not in settings.INSTALLED_APPS:
raise CommandError('Add {} to installed apps'.format(app_name))
# Build one big context of all models and their fields.
context = {'items': [], 'app_name': app_name}
all_models = apps.all_models[app_name]
for name, model in all_models.items():
if "_" not in name: # Django auto generated cross tables do have `_`. Exclude them.
context['items'].append({
'app_name': app_name,
'model': model,
'model_name': model.__name__,
'url_name': slugify(model._meta.verbose_name).replace('-', ''),
'model_slug': slugify(model._meta.verbose_name).replace('-', ''),
'verbose_name': model._meta.verbose_name,
'verbose_plural': model._meta.verbose_name,
'table_name': model._meta.db_table,
'slug': slugify(model._meta.verbose_name),
'slug_plural': slugify(model._meta.verbose_name),
'fields': self.get_fields(model),
'concrete_fields': self.get_concrete_fields(model),
'related_fields': self.get_related_fields(model),
'many_to_many_fields': self.get_many_to_many_fields(model),
'date_fields': self.get_date_fields(model),
'text_fields': self.get_text_fields(model),
'releated_with_models': self.get_related_with_models(model),
})
logger.info(context)
print(context)
path = apps.app_configs[app_name].path
for generator in [
SingleFileGenerator,
MultiFileGenerator,
]:
generator(context=context, path=path).generate()
logger.info('Success!')
|
allcaps/django-scaffold
|
scaffold/management/commands/build.py
|
Python
|
mit
| 8,615
|
import urllib.request
import pickle
import sys
import ast
try:
import variables as v
except:
class var():
def __init__(self):
self.screen = None
v = var()
import pygame as py
class textLabel(py.sprite.Sprite):
def __init__(self, text, pos, colour, font, size, variable = False, centred = False):
super().__init__()
self.text = text
self.pos = pos
self.colour = colour
self.font = font
self.size = size
self.variable = variable
self.centred = centred
def update(self):
pos = self.pos
font = py.font.Font(self.font, self.size)
if not self.variable:
label = font.render(self.text, 1, self.colour)
if self.variable:
label = font.render(str(getattr(v, self.text)), 1, self.colour)
if self.centred:
pos = list(self.pos)
pos[0] -= font.size(self.text)[0] / 2
pos[1] -= font.size(self.text)[1] / 2
pos = tuple(pos)
v.screen.blit(label, pos)
class Button(py.sprite.Sprite):
def __init__(self, text, pos, size, hovercolour, normalcolour, font, ID, centred = False, bsize=(0,0)):
super().__init__()
self.ID = ID
self.hovered = False
self.text = text
self.pos = pos
self.hcolour = hovercolour
self.ncolour = normalcolour
self.font = font
self.font = py.font.Font(font, int(size))
self.centred = centred
self.size = bsize
self.set_rect()
def update(self):
self.set_rend()
py.draw.rect(v.screen, self.get_color(), self.rect)
v.screen.blit(self.rend, self.rect)
if self.rect.collidepoint(py.mouse.get_pos()):
self.hovered = True
else:
self.hovered = False
def set_rend(self):
self.rend = self.font.render(self.text, True, (0,0,0))
def get_color(self):
if self.hovered:
return self.hcolour
else:
return self.ncolour
def set_rect(self):
self.set_rend()
self.rect = self.rend.get_rect()
if not self.centred:
self.rect.topleft = self.pos
if self.centred:
self.rect.center = self.pos
if not self.size[0] == 0:
self.rect.width = self.size[0]
if not self.size[1] == 0:
self.rect.height = self.size[1]
def pressed(self):
mouse = py.mouse.get_pos()
if mouse[0] > self.rect.topleft[0]:
if mouse[1] > self.rect.topleft[1]:
if mouse[0] < self.rect.bottomright[0]:
if mouse[1] < self.rect.bottomright[1]:
return True
else: return False
else: return False
else: return False
else: return False
import os, shutil
theFont = None
py.init()
v.screen = py.display.set_mode((640, 480))
v.screen.fill((20, 20, 20))
textLabel("Checking For Updates...", (320, 240), (255, 255, 255), theFont, 50, False, True).update()
py.display.flip()
tries = 0
def reporthook(count, blockSize, totalSize):
if totalSize == -1:
print("FAILED TOTALSIZE")
raise Exception()
#Shows percentage of download
py.event.pump()
for event in py.event.get():
if event.type == py.QUIT:
sys.exit()
percent = int(count*blockSize*100/totalSize)
rect = py.Rect(100, 240, percent*4.4, 30)
v.screen.fill((20, 20, 20))
py.draw.rect(v.screen, (255, 0, 0), rect)
py.draw.rect(v.screen, (0, 0, 0), rect, 2)
py.draw.rect(v.screen, (0, 0, 0), (100, 240, 440, 30), 2)
textLabel("Downloading...", (320, 150), (255, 255, 255), theFont, 50, False, True).update()
textLabel(str(percent) + "%", (320, 255), (255, 255, 255), theFont, 20, False, True).update()
py.display.flip()
def recursive_overwrite(src, dest, ignore=None):
if os.path.isdir(src):
if not os.path.isdir(dest):
os.makedirs(dest)
files = os.listdir(src)
if ignore is not None:
ignored = ignore(src, files)
else:
ignored = set()
for f in files:
if f not in ignored:
recursive_overwrite(os.path.join(src, f),
os.path.join(dest, f),
ignore)
else:
shutil.copyfile(src, dest)
def updateCheck():
global latest
page = urllib.request.urlopen('https://api.github.com/repos/lightopa/aiopa-battles/git/refs/heads/master')
#print(page.read().decode("utf-8"))
#data = json.loads(page.read().decode("utf-8"))
data = ast.literal_eval(page.read().decode("utf-8"))
latest = data["object"]["sha"]
#ind = page.find('class="sha btn btn-outline"')
#latest = page[ind + 38:ind + 45]
#print(latest)
#CHECK IF LATEST IS PROPER
try:
f = open("Update/current.version", "rb")
current = pickle.load(f)
f.close()
except:
print("create new file")
try:
os.mkdir("Update")
except:
pass
f = open("Update/current.version", "wb")
current = 0000
pickle.dump(current, f)
f.close()
print(current, "vs", latest)
if current != latest:
from os import remove
try:
remove("Update/download.zip")
except:
pass
print("downloading latest")
buttons = py.sprite.Group()
buttons.add(Button("Update", (220, 240), 60, (100, 100, 100), (255, 255, 255), theFont, "Y", centred=True))
buttons.add(Button("Ignore", (420, 240), 60, (100, 100, 100), (255, 255, 255), theFont, "N", centred=True))
buttons.add(Button("Skip Update", (320, 300), 40, (100, 100, 100), (255, 255, 255), theFont, "S", centred=True))
labels = py.sprite.Group()
labels.add(textLabel("An Update Is Available:", (320, 150), (255, 255, 255), theFont, 50, False, True))
labels.add(textLabel(str(str(current) + " ==> " + str(latest)), (320, 180), (255, 255, 255), theFont, 20, False, True))
while True:
py.event.pump()
v.screen.fill((20, 20, 20))
buttons.update()
labels.update()
for event in py.event.get():
if event.type == py.QUIT:
sys.exit()
elif event.type == py.MOUSEBUTTONDOWN:
for button in buttons:
if button.pressed():
id = button.ID
if id == "Y":
global tries
tries = 0
download()
return
if id == "N":
return
if id == "S":
f = open("Saves/current.version", "wb")
current = latest
pickle.dump(current, f)
f.close()
return
py.display.flip()
else:
v.screen.fill((20, 20, 20))
t = textLabel("No Update!", (320, 250), (255, 0, 0), theFont, 70, False, True)
v.current = current
t.update()
py.display.update()
if __name__ == "__main__":
py.time.wait(2000)
def download():
global tries
try:
try:
os.mkdir("Update")
except:
pass
urllib.request.urlretrieve("https://github.com/lightopa/Aiopa-Battles/archive/master.zip", "Update/download.zip", reporthook)
f = open("Update/current.version", "wb")
current = latest
pickle.dump(current, f)
f.close()
unzip()
except Exception as e:
tries += 1
print("Error: " + str(e))
v.screen.fill((20, 20, 20))
textLabel("Download Error. Retry " + str(tries) + "/8", (320, 240), (255, 255, 255), theFont, 50, False, True).update()
textLabel("Error: " + str(e), (320, 240), (255, 255, 255), theFont, 50, False, True).update()
py.display.flip()
if tries > 8:
return
download()
def unzip():
v.screen.fill((20, 20, 20))
textLabel("Extracting Data...", (320, 240), (255, 255, 255), theFont, 50, False, True).update()
py.display.flip()
import zipfile
with zipfile.ZipFile('Update/download.zip', "r") as z:
z.extractall("Update/")
v.screen.fill((20, 20, 20))
textLabel("Updating Files...", (320, 240), (255, 255, 255), theFont, 50, False, True).update()
py.display.flip()
from os import getcwd
recursive_overwrite("Update/Aiopa-Battles-master", getcwd())
if __name__ == "__main__":
updateCheck()
|
lightopa/Aiopa-Battles
|
updater.py
|
Python
|
mit
| 9,215
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Presidentielcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript
from io import BytesIO
import time
# A canonical signature consists of:
# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
def unDERify(tx):
'''
Make the signature in vin 0 of a tx non-DER-compliant,
by adding padding after the S-value.
'''
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
newscript.append(i[0:-1] + b'\0' + i[-1:])
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
'''
This test is meant to exercise BIP66 (DER SIG).
Connect to a single node.
Mine 2 (version 2) blocks (save the coinbases for later).
Generate 98 more version 2 blocks, verify the node accepts.
Mine 749 version 3 blocks, verify the node accepts.
Check that the new DERSIG rules are not enforced on the 750th version 3 block.
Check that the new DERSIG rules are enforced on the 751st version 3 block.
Mine 199 new version blocks.
Mine 1 old-version block.
Mine 1 new version block.
Mine 1 old version block, see that the node rejects.
'''
class BIP66Test(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=2']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def get_tests(self):
self.coinbase_blocks = self.nodes[0].generate(2)
height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
''' 98 more version 2 blocks '''
test_blocks = []
for i in range(98):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 749 version 3 blocks '''
test_blocks = []
for i in range(749):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
'''
Check that the new DERSIG rules are not enforced in the 750th
version 3 block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Check that the new DERSIG rules are enforced in the 751st version 3
block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
''' Mine 199 new version blocks on last valid tip '''
test_blocks = []
for i in range(199):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 1 old version block '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
''' Mine 1 new version block '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
''' Mine 1 old version block, should be invalid '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
if __name__ == '__main__':
BIP66Test().main()
|
presidentielcoin/presidentielcoin
|
qa/rpc-tests/bipdersig-p2p.py
|
Python
|
mit
| 6,875
|
from yapsy.IPlugin import IPlugin
from manager.message import Message
import protocol.control as control
import schedule
class Time(IPlugin, Message):
def __init__(self):
IPlugin.__init__(self)
Message.__init__(self)
self.text = control.SPEED_1 + '\x0B9 \x0B8' + control.NEW_LINE + control.CALL_TIME
self.schedule = schedule.every(3).hours
|
ctmyers/sign
|
plugins/time/time.py
|
Python
|
mit
| 380
|
from django import forms
from django.db import models
class Births(models.Model):
year = models.IntegerField("Year")
county = models.CharField("County",max_length=20)
mothersAge = models.IntegerField("Mother's Age")
mothersEdu = models.CharField("Mother's Education",max_length=50)
source = models.URLField("Source")
isRepeat = models.BooleanField("Is a Repeat Birth")
births = models.IntegerField("Births")
def get_fields(self):
fields = []
for f in self._meta.fields:
fields.append(f.name)
return fields
def get_names(self):
names = []
for f in self._meta.fields:
names.append(self._meta.get_field(f.name).verbose_name.title())
return names
def __unicode__(self):
s = "In " + self.county + " county, " + str(self.year)
s += ", there were " + str(self.births)
if self.isRepeat: s += " repeat births to "
else: s += " first births to "
s += str(self.mothersAge) + "-year-old mothers who "
s += self.mothersEdu + ", according to " + self.source
return s
class Diseases(models.Model):
year = models.IntegerField("Year")
county = models.CharField("County",max_length=20)
topic = models.CharField("Topic",max_length=50)
# Topics:
# HIV Cases
# AIDS Cases
# HIV+AIDS Deaths
# HIV+AIDS Deaths Age-Adjusted
source = models.URLField("Source")
count = models.IntegerField("Count")
rate = models.FloatField("Rate")
def __unicode__(self):
s = "In " + self.county + " county, " + str(self.year)
s += ", there were " + str(self.count) + " "
s += self.topic + " (or " + str(self.rate)
s += "%), according to " + self.source
return s
class Upload(models.Model):
upfile = models.FileField(upload_to='Updates Go Here')
|
DalenWBrauner/FloridaDataOverlay
|
Website/Florida_Data_Overlay/Overlay/models.py
|
Python
|
mit
| 1,899
|
# Various utilies for dealing with Neutron and the renaming from Quantum.
from subprocess import check_output
from charmhelpers.core.hookenv import (
config,
log,
ERROR,
)
from charmhelpers.contrib.openstack.utils import os_release
def headers_package():
"""Ensures correct linux-headers for running kernel are installed,
for building DKMS package"""
kver = check_output(['uname', '-r']).strip()
return 'linux-headers-%s' % kver
QUANTUM_CONF_DIR = '/etc/quantum'
def kernel_version():
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
kver = check_output(['uname', '-r']).strip()
kver = kver.split('.')
return (int(kver[0]), int(kver[1]))
def determine_dkms_package():
""" Determine which DKMS package should be used based on kernel version """
# NOTE: 3.13 kernels have support for GRE and VXLAN native
if kernel_version() >= (3, 13):
return []
else:
return ['openvswitch-datapath-dkms']
# legacy
def quantum_plugins():
from charmhelpers.contrib.openstack import context
return {
'ovs': {
'config': '/etc/quantum/plugins/openvswitch/'
'ovs_quantum_plugin.ini',
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
'OVSQuantumPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)],
'services': ['quantum-plugin-openvswitch-agent'],
'packages': [[headers_package()] + determine_dkms_package(),
['quantum-plugin-openvswitch-agent']],
'server_packages': ['quantum-server',
'quantum-plugin-openvswitch'],
'server_services': ['quantum-server']
},
'nvp': {
'config': '/etc/quantum/plugins/nicira/nvp.ini',
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
'QuantumPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)],
'services': [],
'packages': [],
'server_packages': ['quantum-server',
'quantum-plugin-nicira'],
'server_services': ['quantum-server']
}
}
NEUTRON_CONF_DIR = '/etc/neutron'
def neutron_plugins():
from charmhelpers.contrib.openstack import context
release = os_release('nova-common')
plugins = {
'ovs': {
'config': '/etc/neutron/plugins/openvswitch/'
'ovs_neutron_plugin.ini',
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
'OVSNeutronPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': ['neutron-plugin-openvswitch-agent'],
'packages': [[headers_package()] + determine_dkms_package(),
['neutron-plugin-openvswitch-agent']],
'server_packages': ['neutron-server',
'neutron-plugin-openvswitch'],
'server_services': ['neutron-server']
},
'nvp': {
'config': '/etc/neutron/plugins/nicira/nvp.ini',
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
'NeutronPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
'neutron-plugin-nicira'],
'server_services': ['neutron-server']
},
'nsx': {
'config': '/etc/neutron/plugins/vmware/nsx.ini',
'driver': 'vmware',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
'neutron-plugin-vmware'],
'server_services': ['neutron-server']
},
'n1kv': {
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [['neutron-plugin-cisco']],
'server_packages': ['neutron-server',
'neutron-plugin-cisco'],
'server_services': ['neutron-server']
}
}
if release >= 'icehouse':
# NOTE: patch in ml2 plugin for icehouse onwards
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
plugins['ovs']['server_packages'] = ['neutron-server',
'neutron-plugin-ml2']
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
plugins['nvp'] = plugins['nsx']
return plugins
def neutron_plugin_attribute(plugin, attr, net_manager=None):
manager = net_manager or network_manager()
if manager == 'quantum':
plugins = quantum_plugins()
elif manager == 'neutron':
plugins = neutron_plugins()
else:
log('Error: Network manager does not support plugins.')
raise Exception
try:
_plugin = plugins[plugin]
except KeyError:
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
raise Exception
try:
return _plugin[attr]
except KeyError:
return None
def network_manager():
'''
Deals with the renaming of Quantum to Neutron in H and any situations
that require compatability (eg, deploying H with network-manager=quantum,
upgrading from G).
'''
release = os_release('nova-common')
manager = config('network-manager').lower()
if manager not in ['quantum', 'neutron']:
return manager
if release in ['essex']:
# E does not support neutron
log('Neutron networking not supported in Essex.', level=ERROR)
raise Exception
elif release in ['folsom', 'grizzly']:
# neutron is named quantum in F and G
return 'quantum'
else:
# ensure accurate naming for all releases post-H
return 'neutron'
|
jiasir/openstack-trove
|
lib/charmhelpers/contrib/openstack/neutron.py
|
Python
|
mit
| 7,812
|
# Copyright (c) 2008 Joost Cassee
# Licensed under the terms of the MIT License (see LICENSE.txt)
"""
This TinyMCE widget was copied and extended from this code by John D'Agostino:
http://code.djangoproject.com/wiki/CustomWidgetsTinyMCE
"""
from django import forms
from django.conf import settings
from django.contrib.admin import widgets as admin_widgets
from django.core.urlresolvers import reverse
from django.forms.widgets import flatatt
from django.utils.encoding import smart_unicode
from django.utils.html import escape
from django.utils import simplejson
from django.utils.datastructures import SortedDict
from django.utils.safestring import mark_safe
from django.utils.translation import get_language, ugettext as _
import tinymce.settings
class TinyMCE(forms.Textarea):
"""
TinyMCE widget. Set settings.TINYMCE_JS_URL to set the location of the
javascript file. Default is "MEDIA_URL + 'js/tiny_mce/tiny_mce.js'".
You can customize the configuration with the mce_attrs argument to the
constructor.
In addition to the standard configuration you can set the
'content_language' parameter. It takes the value of the 'language'
parameter by default.
In addition to the default settings from settings.TINYMCE_DEFAULT_CONFIG,
this widget sets the 'language', 'directionality' and
'spellchecker_languages' parameters by default. The first is derived from
the current Django language, the others from the 'content_language'
parameter.
"""
def __init__(self, content_language=None, attrs=None, mce_attrs={}):
super(TinyMCE, self).__init__(attrs)
self.mce_attrs = mce_attrs
if content_language is None:
content_language = mce_attrs.get('language', None)
self.content_language = content_language
def render(self, name, value, attrs=None):
if value is None: value = ''
value = smart_unicode(value)
final_attrs = self.build_attrs(attrs)
final_attrs['name'] = name
assert 'id' in final_attrs, "TinyMCE widget attributes must contain 'id'"
mce_config = tinymce.settings.DEFAULT_CONFIG.copy()
mce_config.update(get_language_config(self.content_language))
if tinymce.settings.USE_FILEBROWSER:
mce_config['file_browser_callback'] = "djangoFileBrowser"
mce_config.update(self.mce_attrs)
mce_config['mode'] = 'exact'
mce_config['elements'] = final_attrs['id']
mce_config['strict_loading_mode'] = 1
mce_json = simplejson.dumps(mce_config)
html = [u'<textarea%s>%s</textarea>' % (flatatt(final_attrs), escape(value))]
if tinymce.settings.USE_COMPRESSOR:
compressor_config = {
'plugins': mce_config.get('plugins', ''),
'themes': mce_config.get('theme', 'advanced'),
'languages': mce_config.get('language', ''),
'diskcache': True,
'debug': False,
}
compressor_json = simplejson.dumps(compressor_config)
html.append(u'<script type="text/javascript">tinyMCE_GZ.init(%s)</script>' % compressor_json)
html.append(u'<script type="text/javascript">tinyMCE.init(%s)</script>' % mce_json)
return mark_safe(u'\n'.join(html))
def _media(self):
if tinymce.settings.USE_COMPRESSOR:
js = [reverse('tinymce-compressor')]
else:
js = [tinymce.settings.JS_URL]
if tinymce.settings.USE_FILEBROWSER:
js.append(reverse('tinymce-filebrowser'))
return forms.Media(js=js)
media = property(_media)
class AdminTinyMCE(admin_widgets.AdminTextareaWidget, TinyMCE):
pass
def get_language_config(content_language=None):
language = get_language()[:2]
if content_language:
content_language = content_language[:2]
else:
content_language = language
config = {}
config['language'] = language
lang_names = SortedDict()
for lang, name in settings.LANGUAGES:
if lang[:2] not in lang_names: lang_names[lang[:2]] = []
lang_names[lang[:2]].append(_(name))
sp_langs = []
for lang, names in lang_names.items():
if lang == content_language:
default = '+'
else:
default = ''
sp_langs.append(u'%s%s=%s' % (default, ' / '.join(names), lang))
config['spellchecker_languages'] = ','.join(sp_langs)
if content_language in settings.LANGUAGES_BIDI:
config['directionality'] = 'rtl'
else:
config['directionality'] = 'ltr'
if tinymce.settings.USE_SPELLCHECKER:
config['spellchecker_rpc_url'] = reverse('tinymce.views.spell_check')
return config
|
RaD/django-tinymce
|
tinymce/widgets.py
|
Python
|
mit
| 4,724
|
from canvas_sdk import client, utils
def make_account_admin(request_ctx, account_id, user_id, role=None, role_id=None, send_confirmation=None, **request_kwargs):
"""
Flag an existing user as an admin within the account.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:param user_id: (required) The id of the user to promote.
:type user_id: integer
:param role: (optional) (deprecated)
The user's admin relationship with the account will be created with the
given role. Defaults to 'AccountAdmin'.
:type role: string or None
:param role_id: (optional) The user's admin relationship with the account will be created with the
given role. Defaults to the built-in role for 'AccountAdmin'.
:type role_id: integer or None
:param send_confirmation: (optional) Send a notification email to
the new admin if true. Default is true.
:type send_confirmation: boolean or None
:return: Make an account admin
:rtype: requests.Response (with Admin data)
"""
path = '/v1/accounts/{account_id}/admins'
payload = {
'user_id' : user_id,
'role' : role,
'role_id' : role_id,
'send_confirmation' : send_confirmation,
}
url = request_ctx.base_api_url + path.format(account_id=account_id)
response = client.post(request_ctx, url, payload=payload, **request_kwargs)
return response
def remove_account_admin(request_ctx, account_id, user_id, role=None, role_id=None, **request_kwargs):
"""
Remove the rights associated with an account admin role from a user.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:param user_id: (required) ID
:type user_id: string
:param role: (optional) (Deprecated)
Account role to remove from the user. Defaults to 'AccountAdmin'. Any
other account role must be specified explicitly.
:type role: string or None
:param role_id: (optional) The user's admin relationship with the account will be created with the
given role. Defaults to the built-in role for 'AccountAdmin'.
:type role_id: integer or None
:return: Remove account admin
:rtype: requests.Response (with Admin data)
"""
path = '/v1/accounts/{account_id}/admins/{user_id}'
payload = {
'role' : role,
'role_id' : role_id,
}
url = request_ctx.base_api_url + path.format(account_id=account_id, user_id=user_id)
response = client.delete(request_ctx, url, payload=payload, **request_kwargs)
return response
def list_account_admins(request_ctx, account_id, user_id=None, per_page=None, **request_kwargs):
"""
List the admins in the account
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:param user_id: (optional) Scope the results to those with user IDs equal to any of the IDs specified here.
:type user_id: array or None
:param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE
:type per_page: integer or None
:return: List account admins
:rtype: requests.Response (with array data)
"""
if per_page is None:
per_page = request_ctx.per_page
path = '/v1/accounts/{account_id}/admins'
payload = {
'user_id' : user_id,
'per_page' : per_page,
}
url = request_ctx.base_api_url + path.format(account_id=account_id)
response = client.get(request_ctx, url, payload=payload, **request_kwargs)
return response
|
penzance/canvas_python_sdk
|
canvas_sdk/methods/admins.py
|
Python
|
mit
| 3,874
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import setuptools
from setuptools.command.develop import develop
from setuptools.command.install import install
class DevelopScript(develop):
def run(self):
develop.run(self)
ntlk_install_packages()
class InstallScript(install):
def run(self):
install.run(self)
ntlk_install_packages()
def ntlk_install_packages():
import nltk
print("Downloading nltk packages...")
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
setuptools.setup(
name="zeeguu_core",
version="0.1",
packages=setuptools.find_packages(),
include_package_data=True,
zip_safe=False,
author="Zeeguu Team",
author_email="me@mir.lu",
description="Core for Zeeguu",
keywords="second language acquisition api",
cmdclass={
'develop': DevelopScript,
'install': InstallScript,
},
)
|
mircealungu/Zeeguu-Core
|
setup.py
|
Python
|
mit
| 923
|
import os, sys; sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import pytest
from lasio import read
test_dir = os.path.dirname(__file__)
egfn = lambda fn: os.path.join(os.path.dirname(__file__), "examples", fn)
def test_open_url():
l = read("https://raw.githubusercontent.com/kinverarity1/"
"lasio/master/standards/examples"
"/1.2/sample_curve_api.las")
def test_open_file_object():
with open(egfn("sample.las"), mode="r") as f:
l = read(f)
def test_open_filename():
l = read(egfn("sample.las"))
def test_open_incorrect_filename():
with pytest.raises(OSError):
l = read(egfn("sampleXXXDOES NOT EXIST.las"))
def test_open_string():
l = read("""~VERSION INFORMATION
VERS. 1.2: CWLS LOG ASCII STANDARD -VERSION 1.2
WRAP. NO: ONE LINE PER DEPTH STEP
~WELL INFORMATION BLOCK
#MNEM.UNIT DATA TYPE INFORMATION
#--------- ------------- ------------------------------
STRT.M 1670.000000:
STOP.M 1660.000000:
STEP.M -0.1250:
NULL. -999.2500:
COMP. COMPANY: # ANY OIL COMPANY LTD.
WELL. WELL: ANY ET AL OIL WELL #12
FLD . FIELD: EDAM
LOC . LOCATION: A9-16-49-20W3M
PROV. PROVINCE: SASKATCHEWAN
SRVC. SERVICE COMPANY: ANY LOGGING COMPANY LTD.
DATE. LOG DATE: 25-DEC-1988
UWI . UNIQUE WELL ID: 100091604920W300
~CURVE INFORMATION
#MNEM.UNIT API CODE CURVE DESCRIPTION
#--------- ------------- ------------------------------
DEPT.M : 1 DEPTH
DT .US/M : 2 SONIC TRANSIT TIME
RHOB.K/M3 : 3 BULK DENSITY
NPHI.V/V : 4 NEUTRON POROSITY
SFLU.OHMM : 5 RXO RESISTIVITY
SFLA.OHMM : 6 SHALLOW RESISTIVITY
ILM .OHMM : 7 MEDIUM RESISTIVITY
ILD .OHMM : 8 DEEP RESISTIVITY
~PARAMETER INFORMATION
#MNEM.UNIT VALUE DESCRIPTION
#--------- ------------- ------------------------------
BHT .DEGC 35.5000: BOTTOM HOLE TEMPERATURE
BS .MM 200.0000: BIT SIZE
FD .K/M3 1000.0000: FLUID DENSITY
MATR. 0.0000: NEUTRON MATRIX(0=LIME,1=SAND,2=DOLO)
MDEN. 2710.0000: LOGGING MATRIX DENSITY
RMF .OHMM 0.2160: MUD FILTRATE RESISTIVITY
DFD .K/M3 1525.0000: DRILL FLUID DENSITY
~Other
Note: The logging tools became stuck at 625 meters causing the data
between 625 meters and 615 meters to be invalid.
~A DEPTH DT RHOB NPHI SFLU SFLA ILM ILD
1670.000 123.450 2550.000 0.450 123.450 123.450 110.200 105.600
1669.875 123.450 2550.000 0.450 123.450 123.450 110.200 105.600
1669.750 123.450 2550.000 0.450 123.450 123.450 110.200 105.600
""")
|
Kramer477/lasio
|
tests/test_open_file.py
|
Python
|
mit
| 2,943
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('taskmanager', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('name', models.CharField(verbose_name='name', max_length=100, help_text='Enter the project name')),
('color', models.CharField(verbose_name='color', validators=[django.core.validators.RegexValidator('(^#[0-9a-fA-F]{3}$)|(^#[0-9a-fA-F]{6}$)')], default='#fff', max_length=7, help_text='Enter the hex color code, like #ccc or #cccccc')),
('user', models.ForeignKey(verbose_name='user', related_name='profjects', to='taskmanager.Profile')),
],
options={
'ordering': ('user', 'name'),
'verbose_name': 'Project',
'verbose_name_plural': 'Projects',
},
),
migrations.AlterUniqueTogether(
name='project',
unique_together=set([('user', 'name')]),
),
]
|
memnonila/taskbuster
|
taskbuster/apps/taskmanager/migrations/0002_auto_20150708_1158.py
|
Python
|
mit
| 1,290
|
###################################
## SPADE formant analysis script ##
###################################
## Processes and extracts 'static' (single point) formant values, along with linguistic
## and acoustic information from corpora collected as part of the SPeech Across Dialects
## of English (SPADE) project.
## Input:
## - corpus name (e.g., Buckeye, SOTC)
## - corpus metadata (stored in a YAML file)
## this file should specify the path to the
## audio, transcripts, metadata files (e.g.,
## speaker, lexicon), and the a datafile containing
## prototype formant values to be used for formant
## estimation
## Output:
## - CSV of single-point vowel measurements (1 row per token),
## with columns for the linguistic, acoustic, and speaker information
## associated with that token
import sys
import os
import argparse
base_dir = os.path.dirname(os.path.abspath(__file__))
script_dir = os.path.join(base_dir, 'Common')
sys.path.insert(0, script_dir)
drop_formant = True
import common
from polyglotdb.utils import ensure_local_database_running
from polyglotdb import CorpusConfig
## Define and process command line arguments
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('corpus_name', help='Name of the corpus')
parser.add_argument('-r', '--reset', help="Reset the corpus", action='store_true')
parser.add_argument('-f', '--formant_reset', help="Reset formant measures", action = 'store_true', default=False)
parser.add_argument('-d', '--docker', help="This script is being called from Docker", action='store_true')
args = parser.parse_args()
corpus_name = args.corpus_name
reset = args.reset
docker = args.docker
reset_formants = args.formant_reset
directories = [x for x in os.listdir(base_dir) if os.path.isdir(x) and x != 'Common']
## sanity-check the corpus name (i.e., that it directs to a YAML file)
if args.corpus_name not in directories:
print(
'The corpus {0} does not have a directory (available: {1}). Please make it with a {0}.yaml file inside.'.format(
args.corpus_name, ', '.join(directories)))
sys.exit(1)
corpus_conf = common.load_config(corpus_name)
print('Processing...')
## apply corpus reset or docker application
## if flags are used
if reset:
common.reset(corpus_name)
ip = common.server_ip
if docker:
ip = common.docker_ip
with ensure_local_database_running(corpus_name, port=common.server_port, ip=ip, token=common.load_token()) as params:
print(params)
config = CorpusConfig(corpus_name, **params)
config.formant_source = 'praat'
## Common set up: see commony.py for details of these functions ##
## Check if the corpus already has an associated graph object; if not,
## perform importing and parsing of the corpus files
common.loading(config, corpus_conf['corpus_directory'], corpus_conf['input_format'])
## Perform linguistic, speaker, and acoustic enrichment
common.lexicon_enrichment(config, corpus_conf['unisyn_spade_directory'], corpus_conf['dialect_code'])
common.speaker_enrichment(config, corpus_conf['speaker_enrichment_file'])
common.basic_enrichment(config, corpus_conf['vowel_inventory'] + corpus_conf['extra_syllabic_segments'], corpus_conf['pauses'])
## Check if the YAML specifies the path to the YAML file
## if not, load the prototypes file from the default location
## (within the SPADE corpus directory)
vowel_prototypes_path = corpus_conf.get('vowel_prototypes_path','')
if not vowel_prototypes_path:
vowel_prototypes_path = os.path.join(base_dir, corpus_name, '{}_prototypes.csv'.format(corpus_name))
## Determine the class of phone labels to be used for formant analysis
## based on lists provided in the YAML file.
if corpus_conf['stressed_vowels']:
vowels_to_analyze = corpus_conf['stressed_vowels']
else:
vowels_to_analyze = corpus_conf['vowel_inventory']
## Perform formant estimation and analysis
## see common.py for the details of this implementation
common.formant_acoustic_analysis(config, vowels_to_analyze, vowel_prototypes_path, drop_formant=drop_formant, reset_formants=reset_formants)
## Output the query (determined in common.py) as a CSV file
common.formant_export(config, corpus_name, corpus_conf['dialect_code'],
corpus_conf['speakers'], vowels_to_analyze, output_tracks=False)
print('Finishing up!')
|
MontrealCorpusTools/SPADE
|
formant.py
|
Python
|
mit
| 4,664
|
from .base import ItemCollector
class BaseProbabilityCollector(ItemCollector):
# result_dependencies = (*CountCollector, *FrequencyCollector)
def __init__(self, previous_collector_set):
super().__init__(previous_collector_set)
self.__cached_result = None
def get_result(self, collector_set):
if self.__cached_result is None:
self.__cached_result = \
collector_set[self.result_dependencies[1]].get_result(collector_set) \
.normalize(collector_set[self.result_dependencies[0]] \
.get_result(collector_set))
return self.__cached_result
def as_str(self, collector_set, number_fmt=''):
return format(self.get_result(collector_set), number_fmt)
@staticmethod
def result_norm(a, b):
return a.distance_to(b)
|
davidfoerster/schema-matching
|
src/schema_matching/collector/probability.py
|
Python
|
mit
| 748
|
import RPi.GPIO as GPIO
import time
led = 11
GPIO.setmode(GPIO.BOARD)
GPIO.setup(led,GPIO.OUT)
#for x in range(0,100):
GPIO.output(led,True)
time.sleep(0.5)
GPIO.output(led,False)
time.sleep(0.5)
GPIO.output(led,True)
time.sleep(0.5)
GPIO.output(led,False)
time.sleep(0.5)
GPIO.output(led,True)
time.sleep(0.2)
GPIO.output(led,False)
time.sleep(0.2)
GPIO.output(led,True)
time.sleep(0.2)
GPIO.output(led,False)
time.sleep(0.2)
GPIO.output(led,True)
time.sleep(0.2)
GPIO.output(led,False)
time.sleep(0.2)
# time.sleep(1)
# GPIO.output(led,False)
# time.sleep(1)
#GPIO.cleanup()
#GPIO.output(led,False)
|
OuHangKresnik/Ninja
|
raspberry/ledplay/ledstart.py
|
Python
|
mit
| 612
|
import os
from HTMLParser import HTMLParser
from ConfigParser import NoOptionError
import nltk
from sentence import Sentence
from resources import Resources
from wordnet_cache import WordnetCache as Wordnet
class ReadAndEnrich(object):
def __init__(self, conf):
self.conf = conf
self.enricher = Enricher(conf)
self.pairs = []
def read_sentences(self, stream):
for sen1, sen2, tags1, tags2 in self.read_lines(stream):
s1 = self.enricher.add_sentence(sen1, tags1)
s2 = self.enricher.add_sentence(sen2, tags2)
self.pairs.append((s1, s2))
return self.pairs
def clear_pairs(self):
self.pairs = []
def read_lines(self, stream):
enc = self.conf.get('global', 'encoding')
for l in stream:
fs = l.decode(enc).strip().split('\t')
if len(fs) == 2:
sen1 = fs[0]
sen2 = fs[1]
yield sen1, sen2, None, None
elif len(fs) == 6:
sen1 = fs[2]
sen2 = fs[3]
tags1 = fs[4]
tags2 = fs[5]
yield sen1, sen2, tags1, tags2
elif len(fs) == 7:
sen1 = fs[2]
sen2 = fs[3]
tags1 = fs[5]
tags2 = fs[6]
yield sen1, sen2, tags1, tags2
class Enricher(object):
def __init__(self, conf):
self.conf = conf
self.sentences = {}
if self.conf.get('global', 'tokenizer') == 'sts':
self.html_parser = HTMLParser()
self.hunpos = self.init_hunpos()
def init_hunpos(self):
hunpos_dir = self.conf.get('global', 'hunpos_dir')
hunpos_binary = os.path.join(hunpos_dir, 'hunpos-tag')
hunpos_model = os.path.join(hunpos_dir, 'en_wsj.model')
return nltk.tag.HunposTagger(hunpos_model, hunpos_binary)
def add_sentence(self, sentence, tags):
if not sentence in self.sentences:
tokens = self.tokenize_and_tag(sentence, tags)
# filter tokens if the config option remove_stopwords
# and/or remove_punctuation is set
filt_tokens = self.filter_tokens(tokens)
self.add_wordnet_senses(filt_tokens)
s = Sentence(sentence, filt_tokens)
self.sentences[hash(s)] = s
return self.sentences[hash(sentence)]
def add_wordnet_senses(self, tokens):
for token in tokens:
if self.conf.getboolean('wordnet', 'enrich_with_senses'):
token['senses'] = Wordnet.get_senses(token['token'], self.conf.getint('wordnet', 'sense_threshold'))
else:
token['senses'] = set([token['token']])
def filter_tokens(self, tokens):
new_tok = []
for token in tokens:
word = token['token']
if self.conf.getboolean('global', 'remove_stopwords') and word in Resources.stopwords:
continue
if self.conf.getboolean('global', 'remove_punctuation') and word in Resources.punctuation:
continue
if self.conf.getboolean('global', 'filter_frequent_adverbs') and Resources.is_frequent_adverb(word, token['pos']):
continue
new_tok.append(token)
return new_tok
def tokenize_and_tag(self, sentence, tags):
tokens = [{'token': t} for t in self.tokenize(sentence)]
if tags:
self.parse_tags(tokens, tags)
else:
if self.conf.get('global', 'tokenizer') == 'sts':
self.tag_tokens(tokens)
else:
self.dummy_tag_tokens(tokens)
if self.conf.getboolean('global', 'lower'):
for t in tokens:
t['token'] = t['token'].lower()
return tokens
def tokenize(self, sentence):
tok_method = self.conf.get('global', 'tokenizer')
if tok_method == 'simple':
return sentence.split(' ')
if tok_method == 'sts':
return self.sts_tokenize(sentence)
def sts_tokenize(self, sentence):
tokens = nltk.word_tokenize(self.html_parser.unescape(sentence))
toks = []
for tok in tokens:
if tok in Resources.punctuation:
toks.append(tok)
else:
toks += Resources.punct_re.split(tok)
return filter(lambda x: x not in ('', 's'), toks)
def parse_tags(self, tokens, tags_str):
# match tags with tokens and skip tags if a token
# is missing (it was filtered by the tokenizer)
i = 0
for t in tags_str.split():
sp = t.split('/')
if not sp[0] == tokens[i]['token']:
continue
tokens[i]['ner'] = sp[1]
tokens[i]['pos'] = sp[2]
tokens[i]['chunk'] = sp[3]
i += 1
def dummy_tag_tokens(self, tokens):
for t in tokens:
t['pos'] = ''
t['ner'] = ''
t['chunk'] = ''
def tag_tokens(self, tokens):
words = [i['token'] for i in tokens]
pos_tags = self.hunpos.tag(words)
if self.conf.getboolean('penalty', 'penalize_named_entities'):
ne = nltk.ne_chunk(pos_tags)
ner_tags = self.get_ner_tags(ne)
else:
ner_tags = ['' for _ in range(len(tokens))]
for i, tag in enumerate(pos_tags):
tokens[i]['pos'] = tag
tokens[i]['ner'] = ner_tags[i]
def get_ner_tags(self, ne):
tags = []
for piece in ne:
if isinstance(piece, tuple):
tok, pos = piece
tags.append((pos, 'o'))
else:
ne_type = piece.label()
tags.append((piece[0][1], 'b-{0}'.format(ne_type)))
tags += [(tok[1], 'i-{0}'.format(ne_type)) for tok in piece[1:]]
return tags
|
juditacs/semeval
|
semeval/read_and_enrich.py
|
Python
|
mit
| 5,919
|
#! /usr/bin/env python
"""
couchbasekit.fields
~~~~~~~~~~~~~~~~~~~
:website: http://github.com/kirpit/couchbasekit
:copyright: Copyright 2013, Roy Enjoy <kirpit *at* gmail.com>, see AUTHORS.txt.
:license: MIT, see LICENSE.txt for details.
* :class:`couchbasekit.fields.CustomField`
* :class:`couchbasekit.fields.ChoiceField`
* :class:`couchbasekit.fields.EmailField`
* :class:`couchbasekit.fields.PasswordField`
"""
import re
from abc import ABCMeta
class CustomField(object):
"""The abstract custom field to be extended by all other field classes.
.. note::
You can also create your own custom field types by implementing this
class. All you have to do is to assign your final (that is calculated
and ready to be saved) value to the :attr:`value` property. Please
note that it should also accept unicode raw values, which are fetched
and returned from couchbase server. See :class:`PasswordField` source
code as an example.
Please contribute back if you create a generic and useful custom field.
"""
__metaclass__ = ABCMeta
_value = None
def __init__(self):
raise NotImplementedError()
def __repr__(self):
return repr(self.value)
def __eq__(self, other):
if type(other) is type(self) and other.value==self.value:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def value(self):
"""Property to be used when saving a custom field into
:class:`couchbasekit.document.Document` instance.
:returns: The value to be saved for the field within
:class:`couchbasekit.document.Document` instances.
:rtype: mixed
"""
if self._value is None:
raise ValueError("%s's 'value' is not set." % type(self).__name__)
return self._value
@value.setter
def value(self, value):
"""Propery setter that should be used to assign final (calculated)
value.
"""
self._value = value
class ChoiceField(CustomField):
"""The custom field to be used for multi choice options such as gender,
static category list etc. This class can't be used directly that has to be
extended by your choice list class. Thankfully, it's just easy::
class Gender(ChoiceField):
CHOICES = {
'M': 'Male',
'F': 'Female',
}
and all you have to do is to pass the current value to create your choice
object:
>>> choice = Gender('F')
>>> choice.value
'F'
>>> choice.text
'Female'
:param choice: The choice value.
:type choice: basestring
"""
__metaclass__ = ABCMeta
CHOICES = {}
def __eq__(self, other):
if super(ChoiceField, self).__eq__(other) and other.CHOICES==self.CHOICES:
return True
return False
def __init__(self, choice):
if not isinstance(self.CHOICES, dict) or not len(self.CHOICES):
raise AttributeError("ChoiceFields must have dictionary 'CHOICES' "
"attribute and cannot be empty.")
if choice not in self.CHOICES:
raise ValueError("Default choice for %s must be "
"within the 'CHOICES' attribute."
% type(self).__name__)
self.value = choice
@property
def text(self):
"""Returns the text of the current choice, object property.
:rtype: unicode
"""
return self.CHOICES.get(self.value)
def iteritems(self):
return self.CHOICES.iteritems()
# stolen from django email validator:
EMAIL_RE = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
# quoted-string, see also http://tools.ietf.org/html/rfc2822#section-3.2.5
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"'
r')@((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)$)' # domain
r'|\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$', re.IGNORECASE) # literal form, ipv4 address (SMTP 4.1.3)
class EmailField(CustomField):
"""The custom field to be used for email addresses and intended to validate
them as well.
:param email: Email address to be saved.
:type email: basestring
"""
def __init__(self, email):
if not self.is_valid(email):
raise ValueError("Email address is invalid.")
self.value = email
@staticmethod
def is_valid(email):
"""Email address validation method.
:param email: Email address to be saved.
:type email: basestring
:returns: True if email address is correct, False otherwise.
:rtype: bool
"""
if isinstance(email, basestring) and EMAIL_RE.match(email):
return True
return False
class PasswordField(CustomField):
"""The custom field to be used for password types.
It encrypts the raw passwords on-the-fly and depends on
`py-bcrypt` library for such encryption.
:param password: Raw or encrypted password value.
:type password: unicode
:raises: :exc:`ImportError` if `py-bcrypt` was not found.
"""
LOG_ROUNDS = 12
def __init__(self, password):
if not isinstance(password, basestring):
raise ValueError("Password must be a string or unicode.")
# do the encryption if raw password provided
if not password.startswith(('$2a$', '$2y$')):
bcrypt = self.get_bcrypt()
password = bcrypt.hashpw(password, bcrypt.gensalt(self.LOG_ROUNDS))
self.value = password
@staticmethod
def get_bcrypt():
"""Returns the `py-bcrypt` library for internal usage.
:returns: `py-bcrypt` package.
:raises: :exc:`ImportError` if `py-bcrypt` was not found.
"""
try: import bcrypt
except ImportError:
raise ImportError("PasswordField requires 'py-bcrypt' "
"library to hash the passwords.")
else: return bcrypt
def check_password(self, raw_password):
"""Validates the given raw password against the intance's encrypted one.
:param raw_password: Raw password to be checked against.
:type raw_password: unicode
:returns: True if comparison was successful, False otherwise.
:rtype: bool
:raises: :exc:`ImportError` if `py-bcrypt` was not found.
"""
bcrypt = self.get_bcrypt()
return bcrypt.hashpw(raw_password, self.value)==self.value
|
kirpit/couchbasekit
|
couchbasekit/fields.py
|
Python
|
mit
| 6,692
|
from ..base import HaravanResource
class ShippingLine(HaravanResource):
pass
|
Haravan/haravan_python_api
|
haravan/resources/shipping_line.py
|
Python
|
mit
| 83
|
from gscripts import qtools
import sys, os
if not os.path.exists("fastqc/"):
os.mkdir("fastqc")
cmds = []
Sub = qtools.Submitter()
for fileName in sys.argv[1:]:
fastqc_command = "fastqc -o fastqc %s" %fileName
cmds.append(fastqc_command)
Sub.job(command_list=cmds, sh_file="runFastqc.sh", job_name="Fastqc", array=True, queue="home", nodes=1, ppn=1, submit=True, max_running=1000)
|
YeoLab/gscripts
|
gscripts/general/submit_fastqc.py
|
Python
|
mit
| 403
|
# -*- coding: utf-8 -*-
import csv
import os
import gzip
class File:
def read(self, path, **kwargs):
path = os.path.join(kwargs.get('root_path', ''), path)
content_type = kwargs.get('content_type', 'txt')
if content_type == 'txt':
with file(path, 'r') as f:
content = f.read()
yield content
elif content_type == 'gz':
with gzip.open(path, 'r') as f:
content = f.read()
yield content
elif content_type == 'csv':
with open(path, 'rU') as f:
reader = csv.reader(f)
for line in reader:
yield line
else:
raise Exception('Bad file type')
def write(self, path, content, **kwargs):
path = os.path.join(kwargs.get('root_path', ''), path)
content_type = kwargs.get('content_type', 'txt')
if content_type == 'txt':
with file(path, 'wb') as f:
f.write(content)
elif content_type == 'gz':
with gzip.open(path, 'w') as f:
f.write(content)
elif content_type == 'csv':
with open(path, 'wb') as f:
writer = csv.writer(f)
for c in content:
if c['type'] == 'single':
writer.writerow(c['data'])
elif c['type'] == 'multi':
writer.writerows(c['data'])
else:
raise Exception('Row type must be specified')
else:
raise Exception('Bad file type')
def exists(self, path):
return os.path.exists(path)
|
alibozorgkhan/utils
|
utils/fileoo.py
|
Python
|
mit
| 1,696
|
import json
from django.core.serializers.json import DjangoJSONEncoder
from instanotifier.fetcher.rss.fetcher import fetch_rss_feed
def fetch_rss_url(url):
result = fetch_rss_feed(url)
return result
def write_to_file(filename, data):
json_data = json.dumps(data, cls=DjangoJSONEncoder)
with open(filename, "w") as f:
f.write(json_data)
def read_from_file(filename):
# NOTE: on loading the RssNotification, json.loads deserializes the publisher_parsed field into invalid format
with open(filename, "r") as f:
filedata = f.read()
json_data = json.loads(filedata)
return json_data
|
AlexanderKaluzhny/instanotifier
|
instanotifier/fetcher/rss/utils.py
|
Python
|
mit
| 635
|
''' Work of Cameron Palk '''
import sys
import pandas as pd
def main( argv ):
try:
csv_filepath = argv[ 0 ]
output_filepath = argv[ 1 ]
except IndexError:
print( "Error, usage: \"python3 coord_bounds.py <CSV> <output_file>\"" )
return
training_data = pd.read_csv( csv_filepath )
training_data[ 'clean_Latitude' ] = training_data[ training_data.Latitude > 47 ].Latitude
training_data[ 'clean_Longitude' ] = training_data[ training_data.Longitude < -122 ].Longitude
training_data.dropna()
print( training_data[ 'clean_Latitude' ] )
for axis in [ 'clean_Longitude', 'clean_Latitude' ]:
print( "{:16} min: {:16} max: {:16}".format(
axis,
min( training_data[ axis ] ),
max( training_data[ axis ] )
) )
#
if __name__=='__main__':
main( sys.argv[ 1: ] )
|
CKPalk/SeattleCrime_DM
|
DataMining/Stats/coord_bounds.py
|
Python
|
mit
| 801
|
import my_data_file
d = my_data_file.my_data
print "Hello my name is %s and i am %d years of age and my coolnes is %d " % (d [ 'naam' ], d [ 'age' ], d ['coolheid'])
|
ArtezGDA/text-IO
|
Martijn/format.py
|
Python
|
mit
| 170
|
'''
Support for WM_TOUCH messages (Windows platform)
================================================
'''
__all__ = ('WM_MotionEventProvider', 'WM_MotionEvent')
import os
from kivy.input.providers.wm_common import WNDPROC, \
SetWindowLong_WndProc_wrapper, RECT, POINT, WM_TABLET_QUERYSYSTEMGESTURE, \
QUERYSYSTEMGESTURE_WNDPROC, WM_TOUCH, WM_MOUSEMOVE, WM_MOUSELAST, \
TOUCHINPUT, PEN_OR_TOUCH_MASK, PEN_OR_TOUCH_SIGNATURE, PEN_EVENT_TOUCH_MASK
from kivy.input.motionevent import MotionEvent
from kivy.input.shape import ShapeRect
Window = None
class WM_MotionEvent(MotionEvent):
'''MotionEvent representing the WM_MotionEvent event.
Supports pos, shape and size profiles.
'''
__attrs__ = ('size', )
def __init__(self, *args, **kwargs):
kwargs.setdefault('is_touch', True)
kwargs.setdefault('type_id', 'touch')
super().__init__(*args, **kwargs)
self.profile = ('pos', 'shape', 'size')
def depack(self, args):
self.shape = ShapeRect()
self.sx, self.sy = args[0], args[1]
self.shape.width = args[2][0]
self.shape.height = args[2][1]
self.size = self.shape.width * self.shape.height
super().depack(args)
def __str__(self):
args = (self.id, self.uid, str(self.spos), self.device)
return '<WMMotionEvent id:%d uid:%d pos:%s device:%s>' % args
if 'KIVY_DOC' in os.environ:
# documentation hack
WM_MotionEventProvider = None
else:
from ctypes.wintypes import HANDLE
from ctypes import (windll, sizeof, byref)
from collections import deque
from kivy.input.provider import MotionEventProvider
from kivy.input.factory import MotionEventFactory
class WM_MotionEventProvider(MotionEventProvider):
def start(self):
global Window
if not Window:
from kivy.core.window import Window
self.touch_events = deque()
self.touches = {}
self.uid = 0
# get window handle, and register to receive WM_TOUCH messages
self.hwnd = windll.user32.GetActiveWindow()
windll.user32.RegisterTouchWindow(self.hwnd, 1)
# inject our own wndProc to handle messages
# before window manager does
self.new_windProc = WNDPROC(self._touch_wndProc)
self.old_windProc = SetWindowLong_WndProc_wrapper(
self.hwnd, self.new_windProc)
def update(self, dispatch_fn):
c_rect = RECT()
windll.user32.GetClientRect(self.hwnd, byref(c_rect))
pt = POINT(x=0, y=0)
windll.user32.ClientToScreen(self.hwnd, byref(pt))
x_offset, y_offset = pt.x, pt.y
usable_w, usable_h = float(c_rect.w), float(c_rect.h)
while True:
try:
t = self.touch_events.pop()
except:
break
# adjust x,y to window coordinates (0.0 to 1.0)
x = (t.screen_x() - x_offset) / usable_w
y = 1.0 - (t.screen_y() - y_offset) / usable_h
# actually dispatch input
if t.event_type == 'begin':
self.uid += 1
self.touches[t.id] = WM_MotionEvent(
self.device, self.uid, [x, y, t.size()])
dispatch_fn('begin', self.touches[t.id])
if t.event_type == 'update' and t.id in self.touches:
self.touches[t.id].move([x, y, t.size()])
dispatch_fn('update', self.touches[t.id])
if t.event_type == 'end' and t.id in self.touches:
touch = self.touches[t.id]
touch.move([x, y, t.size()])
touch.update_time_end()
dispatch_fn('end', touch)
del self.touches[t.id]
def stop(self):
windll.user32.UnregisterTouchWindow(self.hwnd)
self.new_windProc = SetWindowLong_WndProc_wrapper(
self.hwnd, self.old_windProc)
# we inject this wndProc into our main window, to process
# WM_TOUCH and mouse messages before the window manager does
def _touch_wndProc(self, hwnd, msg, wParam, lParam):
done = False
if msg == WM_TABLET_QUERYSYSTEMGESTURE:
return QUERYSYSTEMGESTURE_WNDPROC
if msg == WM_TOUCH:
done = self._touch_handler(msg, wParam, lParam)
if msg >= WM_MOUSEMOVE and msg <= WM_MOUSELAST:
done = self._mouse_handler(msg, wParam, lParam)
if not done:
return windll.user32.CallWindowProcW(self.old_windProc,
hwnd, msg, wParam,
lParam)
return 1
# this on pushes WM_TOUCH messages onto our event stack
def _touch_handler(self, msg, wParam, lParam):
touches = (TOUCHINPUT * wParam)()
windll.user32.GetTouchInputInfo(HANDLE(lParam),
wParam,
touches,
sizeof(TOUCHINPUT))
for i in range(wParam):
self.touch_events.appendleft(touches[i])
windll.user32.CloseTouchInputHandle(HANDLE(lParam))
return True
# filter fake mouse events, because touch and stylus
# also make mouse events
def _mouse_handler(self, msg, wparam, lParam):
info = windll.user32.GetMessageExtraInfo()
# its a touch or a pen
if (info & PEN_OR_TOUCH_MASK) == PEN_OR_TOUCH_SIGNATURE:
if info & PEN_EVENT_TOUCH_MASK:
return True
MotionEventFactory.register('wm_touch', WM_MotionEventProvider)
|
kivy/kivy
|
kivy/input/providers/wm_touch.py
|
Python
|
mit
| 5,945
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from dr import app
if __name__ == '__main__':
app.run()
|
goll/flask_app
|
provisioning/files/wsgi.py
|
Python
|
mit
| 109
|
def fib_recursive(n):
"""[summary]
Computes the n-th fibonacci number recursive.
Problem: This implementation is very slow.
approximate O(2^n)
Arguments:
n {[int]} -- [description]
Returns:
[int] -- [description]
"""
# precondition
assert n >= 0, 'n must be a positive integer'
if n <= 1:
return n
else:
return fib_recursive(n-1) + fib_recursive(n-2)
# print(fib_recursive(35)) # => 9227465 (slow)
def fib_list(n):
"""[summary]
This algorithm computes the n-th fibbonacci number
very quick. approximate O(n)
The algorithm use dynamic programming.
Arguments:
n {[int]} -- [description]
Returns:
[int] -- [description]
"""
# precondition
assert n >= 0, 'n must be a positive integer'
list_results = [0, 1]
for i in range(2, n+1):
list_results.append(list_results[i-1] + list_results[i-2])
return list_results[n]
# print(fib_list(100)) # => 354224848179261915075
def fib_iter(n):
"""[summary]
Works iterative approximate O(n)
Arguments:
n {[int]} -- [description]
Returns:
[int] -- [description]
"""
# precondition
assert n >= 0, 'n must be positive integer'
fib_1 = 0
fib_2 = 1
sum = 0
if n <= 1:
return n
for i in range(n-1):
sum = fib_1 + fib_2
fib_1 = fib_2
fib_2 = sum
return sum
# => 354224848179261915075
# print(fib_iter(100))
|
amaozhao/algorithms
|
algorithms/dp/fib.py
|
Python
|
mit
| 1,513
|
'''
spawn º¯Êý»¹¿ÉÓÃÓÚÔÚºǫ́ÔËÐÐÒ»¸ö³ÌÐò. ÏÂÀýÖÐ ¸ø run º¯ÊýÌí¼ÓÁËÒ»¸ö¿ÉÑ¡µÄ mode ²ÎÊý;
µ±ÉèÖÃΪ os.P_NOWAIT ʱ, Õâ¸ö½Å±¾²»»áµÈ´ý×Ó³ÌÐò½áÊø, ĬÈÏÖµ os.P_WAIT ʱ spawn »áµÈ´ý×Ó½ø³Ì½áÊø.
ÆäËüµÄ±êÖ¾³£Á¿»¹ÓÐ os.P_OVERLAY ,ËüʹµÃ spawn µÄÐÐΪºÍ exec ÀàËÆ, ÒÔ¼° os.P_DETACH , ËüÔÚºǫ́ÔËÐÐ×Ó½ø³Ì, Ó뵱ǰ¿ØÖÆÌ¨ºÍ¼üÅ̽¹µã¸ôÀë.
'''
import os
import string
def run(program, *args, **kw):
# find executable
mode = kw.get("mode", os.P_WAIT)
for path in string.split(os.environ["PATH"], os.pathsep):
file = os.path.join(path, program) + ".exe"
try:
return os.spawnv(mode, file, (file,) + args)
except os.error:
pass
raise os.error, "cannot find executable"
run("python", "hello.py", mode=os.P_NOWAIT)
print "goodbye"
|
iamweilee/pylearn
|
os-spawn-example-2.py
|
Python
|
mit
| 778
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-21 13:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exdb', '0005_swap_type_subtype'),
]
operations = [
migrations.AddField(
model_name='experience',
name='temp_subtypes',
field=models.ManyToManyField(blank=True, related_name='experience_set', to='exdb.Subtype'),
),
]
|
mostateresnet/exdbproject
|
exdb/migrations/0006_experience_temp_subtypes.py
|
Python
|
mit
| 509
|
# i2c_esp.py Test program for asi2c.py
# Tests Responder on ESP8266
# The MIT License (MIT)
#
# Copyright (c) 2018 Peter Hinch
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# pyb esp8266
# scl X9 - 0
# sda X10 - 2
# sync X11 - 5
# ack Y8 - 4
# gnd - gnd
import uasyncio as asyncio
from machine import Pin, I2C
import asi2c
import ujson
i2c = I2C(scl=Pin(0),sda=Pin(2)) # software I2C
syn = Pin(5)
ack = Pin(4)
chan = asi2c.Responder(i2c, syn, ack)
async def receiver():
sreader = asyncio.StreamReader(chan)
await chan.ready()
print('started')
for _ in range(5): # Test flow control
res = await sreader.readline()
print('Received', ujson.loads(res))
await asyncio.sleep(4)
while True:
res = await sreader.readline()
print('Received', ujson.loads(res))
async def sender():
swriter = asyncio.StreamWriter(chan, {})
txdata = [0, 0]
while True:
await swriter.awrite(''.join((ujson.dumps(txdata), '\n')))
txdata[1] += 1
await asyncio.sleep_ms(1500)
loop = asyncio.get_event_loop()
loop.create_task(receiver())
loop.create_task(sender())
try:
loop.run_forever()
finally:
chan.close() # for subsequent runs
|
peterhinch/micropython-async
|
v2/i2c/i2c_esp.py
|
Python
|
mit
| 2,245
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-12 14:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('type_page', '0004_auto_20170711_1241'),
]
operations = [
migrations.AddField(
model_name='footballtype',
name='comments',
field=models.CharField(max_length=128, null=True),
),
]
|
dumel93/project-
|
type_page/migrations/0005_footballtype_comments.py
|
Python
|
mit
| 471
|
'''
Created on May 20, 2012
@author: Jason Huang
'''
#Given an array of integers A, give an algorithm to find the longest Arithmetic progression in it, i.e find a sequence i1 < i2 < ¡ < ik, such that
#A[i1], A[i2], ¡, A[ik] forms an arithmetic progression, and k is the largest possible.
#The sequence S1, S2, ¡, Sk is called an arithmetic progression if
#Sj+1 ¨C Sj is a constant
if __name__ == '__main__':
pass
|
jastination/software-engineering-excercise-repository
|
seer_python/dp/MaxArithmeticProgression.py
|
Python
|
mit
| 440
|
""" Extend suppliers table with new fields (to be initially populated from declaration data)
Revision ID: 940
Revises: 930
Create Date: 2017-08-16 16:39:00.000000
"""
# revision identifiers, used by Alembic.
revision = '940'
down_revision = '930'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(u'suppliers', sa.Column('registered_name', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('registration_country', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('other_company_registration_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('registration_date', sa.DateTime(), nullable=True))
op.add_column(u'suppliers', sa.Column('vat_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('organisation_size', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('trading_status', sa.String(), nullable=True))
def downgrade():
op.drop_column(u'suppliers', 'registered_name')
op.drop_column(u'suppliers', 'registration_country')
op.drop_column(u'suppliers', 'other_company_registration_number')
op.drop_column(u'suppliers', 'registration_date')
op.drop_column(u'suppliers', 'vat_number')
op.drop_column(u'suppliers', 'organisation_size')
op.drop_column(u'suppliers', 'trading_status')
|
alphagov/digitalmarketplace-api
|
migrations/versions/940_more_supplier_details.py
|
Python
|
mit
| 1,372
|
from chainer.iterators import multiprocess_iterator
from chainer.iterators import serial_iterator
MultiprocessIterator = multiprocess_iterator.MultiprocessIterator
SerialIterator = serial_iterator.SerialIterator
|
kikusu/chainer
|
chainer/iterators/__init__.py
|
Python
|
mit
| 214
|
#!/usr/local/bin python
# -*- coding: utf-8 -*-
from RPSOM import Model
from RPSOM.transition_graph import output_graph
if __name__=='__main__':
# learning rate alpha setup
alpha_max = [0.1, 0.5, 0.7]
alpha_min = [0.01, 0.1, 0.2]
# neighborhood radius sigma setup
sigma_max = [5, 7, 10]
sigma_min = [1, 2, 3]
epochs = 10
# RPSOM model setup
rpsom=Model.RPSOM(epochs, 15, 20, input_file="animal.csv", alpha_max=alpha_max, alpha_min=alpha_min, sigma_max=sigma_max, sigma_min=sigma_min, log_file="test.log")
#cb = [som.write_BMU for som in rpsom.som]
cb = None
# RPSOM train
rpsom.fit (trainX=rpsom.input_x, epochs=rpsom.epochs, verbose=0, callbacks=cb)
# Output Map
# Output thickness map
rpsom.map_output2wrl_squ(grad=100, filename="test")
# Output grayscale 2D map
filename="example_animal"
rpsom.map_output2wrl_gray_squ(filename)
# Output transition graph
output_graph(rpsom)
rpsom.weight_output_csv ("rpsom_weight")
|
kinect110/RPSOM
|
src/examples/RPSOM_animal.py
|
Python
|
mit
| 941
|
# -*- coding:utf-8 -*-
""" Provide log related functions. You need to Initialize the logger and use the logger to make logs.
Example:
>>> logger = Initialize()
Use logger.level(\*msg) to log like:
>>> logger.error("Pickle data writing Failed.")
>>> logger.info("Pickle data of ", foo, " written successfully.")
The log will be stored into LogFile.log by default.
"""
__author__ = "Wang Hewen"
import sys
import logging
logging.currentframe = lambda: sys._getframe(5)
class Logger(logging.Logger):
def debug(self, *args, **kwargs):
super().log("".join([str(arg) for arg in args]), **kwargs)
def info(self, *args, **kwargs):
super().info("".join([str(arg) for arg in args]), **kwargs)
def warning(self, *args, **kwargs):
super().warning("".join([str(arg) for arg in args]), **kwargs)
def warn(self, *args, **kwargs):
super().warn("".join([str(arg) for arg in args]), **kwargs)
def error(self, *args, **kwargs):
super().error("".join([str(arg) for arg in args]), **kwargs)
def exception(self, *args, exc_info=True, **kwargs):
super().exception("".join([str(arg) for arg in args]), exc_info = exc_info, **kwargs)
def critical(self, *args, **kwargs):
super().critical("".join([str(arg) for arg in args]), **kwargs)
def log(self, level, *args, **kwargs):
super().log(level, "".join([str(arg) for arg in args]), **kwargs)
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
super()._log(level, msg, args, exc_info=None, extra=None, stack_info=False)
def Initialize(FileName = "LogFile.log", LogLevel = "INFO", WriteToStream = False):
'''
Initialize loggers for logging. A logger will be returned.
:param String FileName: Path of the log file
:param String LogLevel: LogLevel of the logger, which can be "DEBUG", "INFO", "ERROR"
:param Boolean WriteToStream: Whether to write to stdout
:return: logger: The logger used for logging
:rtype: logging.loggger
'''
if LogLevel not in ["DEBUG", "INFO", "ERROR"]:
raise ValueError("LogLevel is not correctly set.")
logging.Logger.manager.setLoggerClass(Logger)
logger = logging.getLogger(__name__) #__name__ == CommonModules.Log
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
fileHandler = logging.FileHandler(FileName)
fileHandler.setFormatter(logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s: %(message)s', datefmt = '%Y/%m/%d %H:%M:%S'))
if LogLevel == "DEBUG":
streamHandler = logging.StreamHandler(stream = sys.stdout)
streamHandler.setLevel(logging.DEBUG)
fileHandler.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
if LogLevel == "INFO":
streamHandler = logging.StreamHandler(stream = sys.stdout)
streamHandler.setLevel(logging.INFO)
fileHandler.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
if LogLevel == "ERROR":
streamHandler = logging.StreamHandler(stream = sys.stderr)
streamHandler.setLevel(logging.ERROR)
fileHandler.setLevel(logging.ERROR)
logger.setLevel(logging.ERROR)
streamHandler.setFormatter(logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s: %(message)s', datefmt = '%Y/%m/%d %H:%M:%S'))
if WriteToStream:
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
return logger
|
wanghewen/CommonModules
|
CommonModules/Log.py
|
Python
|
mit
| 3,611
|
import json
from collections import abc
# item 26: use muptiple inheritance for mixin only
# a mixin that transforms a python object to a dictionary that's ready for seralization
class ToDictMixin(object):
def to_dict(self):
"""Return a dictionary representation of this object"""
return self._traverse('none', self.__dict__)
def _traverse(self, key, obj):
"""Return a dictionary representation of this obj"""
if isinstance(obj, ToDictMixin):
return obj.to_dict()
if isinstance(obj, dict):
return {k: self._traverse(k, v) for k, v in obj.items()}
if isinstance(obj, tuple) or isinstance(obj, list):
return [self._traverse(key, item) for item in obj]
# if it's any other object with __dict__ attr, use it!
if hasattr(obj, '__dict__'):
return self._traverse(key, obj.__dict__)
return obj
class BinaryTreeNode(ToDictMixin):
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
class BinaryTreeWithParent(BinaryTreeNode):
def __init__(self, value, left=None, right=None, parent=None):
super().__init__(value, left, right)
self.parent = parent
# override so the backref to parent does not cause infinite recursion
def _traverse(self, key, obj):
# if the key is parent, stop the recursion and return parent's value instead
if key == 'parent' and isinstance(obj, BinaryTreeNode):
return obj.value
return super()._traverse(key, obj)
class NamedSubTree(ToDictMixin):
def __init__(self, name, tree):
self.name = name
self.tree = tree
# Mixins can also play together
class ToJsonMixin(object):
@classmethod
def from_json(cls, kwargs):
"""given kwargs in json format, get it into dictionary format"""
kwargs = json.loads(kwargs)
return cls(**kwargs)
def to_json(self):
d = self.to_dict()
return json.dumps(d)
class BinaryTreeWithJson(BinaryTreeNode, ToJsonMixin):
pass
class EqualityMixin(object):
def __eq__(self, other):
return self.__dict__ == other.__dict__
class Switch(EqualityMixin):
def __init__(self, ports, speed):
self.ports = ports
self.speed = speed
class Machine(EqualityMixin):
def __init__(self, ram, cpu, disk):
self.ram = ram
self.cpu = cpu
self.disk = disk
class DatacenterRack(ToJsonMixin, ToDictMixin, EqualityMixin):
def __init__(self, switch, machines):
self.switch = Switch(**switch)
self.machines = [Machine(**kwargs) for kwargs in machines]
|
totoro72/pt1
|
ep/item_26_multiple_inheritance_for_mixin_only.py
|
Python
|
mit
| 2,705
|
from __future__ import print_function
from __future__ import absolute_import
import re
from pyingest.config.utils import u2asc
from .default import DefaultParser
from .author_names import AuthorNames
from .entity_convert import EntityConverter
head_dict = {'TITLE:': 'journal', 'NUMBER:': 'volume', 'SUBJECT:': 'title',
'DATE:': 'pubdate', 'FROM:': 'email'
}
class GCNCParser(DefaultParser):
def __init__(self, data):
# econv = EntityConverter()
# econv.input_text = data
# econv.convert()
# self.raw = econv.output_text
self.raw = data
self.data_dict = dict()
def make_pubdate(self):
input_date = self.data_dict['pubdate']
yymmdd = input_date.split('/')
if int(yymmdd[0]) > 50:
year = '19' + yymmdd[0]
else:
year = '20' + yymmdd[0]
pubdate = year + '/' + yymmdd[1]
self.data_dict['pubdate'] = pubdate
def make_bibcode(self):
year = self.data_dict['pubdate'][0:4]
bibcode = 'GCN.'
self.data_dict['volume'] = self.data_dict['volume'].ljust(5, '.')
volume = self.data_dict['volume'].ljust(9, '.') + '1'
try:
init = u2asc(self.data_dict['authors'][0][0])
except Exception as err:
print ("Problem generating author initial")
init = '.'
self.data_dict['bibcode'] = year + bibcode + volume + init
def make_publication(self):
base_string = 'GRB Coordinates Network, Circular Service, No. '
self.data_dict['publication'] = base_string + self.data_dict['volume']
self.data_dict['page'] = '1'
def split_authors_abstract(self):
# This could be used to extract affils and apply them to authors,
# but the process of doing so is unwieldy. I'm leaving code that
# was my initial try but commented out.
body = self.data_dict['abstract']
while body[0] == '':
body.pop(0)
auths = []
affils = []
while body[0] != '' and ':' not in body[0]:
auths.append(body.pop(0).strip())
auths.append(body.pop(0).strip())
auth_delimiter = u'| '
auth_string = ' '.join(auths)
auth_string = re.sub(r'\s+\((.*?)\)', ',', auth_string)
auth_string = re.sub(r'[ ,]and\s', ',', auth_string)
auth_string = re.sub(r'on behalf of', ',', auth_string)
auth_string = re.sub(r'reports?', ',', auth_string)
auth_string = re.sub(r'\s?:', '', auth_string)
auth_string = re.sub(r',?\s+,', ',', auth_string)
auth_array = [s.strip() for s in auth_string.split(',')]
auth_array = list([a for a in auth_array if len(a) > 3])
# auth_string = u'; '.join(auth_array)
auth_string = auth_delimiter.join(auth_array)
auth_mod = AuthorNames()
# self.data_dict['authors'] = auth_mod.parse(auth_string)
self.data_dict['authors'] = auth_mod.parse(auth_string, delimiter=auth_delimiter)
self.data_dict['authors'] = re.sub(r'\| ', u';', self.data_dict['authors'])
def parse(self):
self.data_dict = {}
# Start by looking at the Circular line by line...
try:
gdata = self.raw.split('\n')
# Header is fixed format and five lines long...
head = gdata[0:5]
for l in head:
lfix = l.replace(' ', '\t', 1)
lparts = lfix.split('\t')
self.data_dict[head_dict[lparts[0]]] = lparts[1].strip()
# Now you need to split the authors from the abstract.
# This should work in *most* cases, maybe not all,
# especially from older (pre-2016) Circulars
self.data_dict['abstract'] = gdata[5:]
self.split_authors_abstract()
# Authors and abstract content should now be defined
# If you want to try and keep fixed formatting
# (e.g. for tables), use '\n' for the join character
abstract_new = ' '.join(self.data_dict['abstract'])
self.data_dict['abstract'] = abstract_new.strip()
# Extract pubdate from the header date
self.make_pubdate()
# Create the bibcode from circular info
self.make_bibcode()
# Make the publication string
self.make_publication()
# Pass the necessary fields through EntityConverter
ec_fields = ['authors', 'abstract', 'title']
econv = EntityConverter()
for ecf in ec_fields:
econv.input_text = self.data_dict[ecf]
econv.convert()
self.data_dict[ecf] = econv.output_text
except Exception as err:
self.data_dict['raw'] = self.raw
self.data_dict['error'] = err
return self.data_dict
|
adsabs/adsabs-pyingest
|
pyingest/parsers/gcncirc.py
|
Python
|
mit
| 4,888
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_required
from pytz import common_timezones
import settings
from tekton import router
from tekton.gae.middleware.redirect import RedirectResponse
from gaepermission.decorator import login_not_required
@login_not_required
@no_csrf
def edit(_logged_user, name, user_locale, timezone):
if name:
_logged_user.name = name
_logged_user.locale = user_locale
_logged_user.timezone = timezone
_logged_user.put()
return RedirectResponse('/')
@login_not_required
@no_csrf
def index(_logged_user):
_logged_user.locale = _logged_user.locale or settings.DEFAULT_LOCALE
_logged_user.timezone = _logged_user.timezone or settings.DEFAULT_TIMEZONE
context = {'user': _logged_user,
'timezones': common_timezones,
'save_path': router.to_path(edit)}
return TemplateResponse(context, 'permission/account_form.html')
|
erikabarros/naguil
|
backend/appengine/routes/account.py
|
Python
|
mit
| 1,088
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Pythonic simple SOAP Client implementation"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2008 Mariano Reingart"
__license__ = "LGPL 3.0"
__version__ = "1.07a"
TIMEOUT = 60
import cPickle as pickle
import hashlib
import logging
import os
import tempfile
import urllib2
from urlparse import urlsplit
from simplexml import SimpleXMLElement, TYPE_MAP, REVERSE_TYPE_MAP, OrderedDict
from transport import get_http_wrapper, set_http_wrapper, get_Http
log = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
class SoapFault(RuntimeError):
def __init__(self,faultcode,faultstring):
self.faultcode = faultcode
self.faultstring = faultstring
RuntimeError.__init__(self, faultcode, faultstring)
def __str__(self):
return self.__unicode__().encode("ascii", "ignore")
def __unicode__(self):
return u'%s: %s' % (self.faultcode, self.faultstring)
def __repr__(self):
return u"SoapFault(%s, %s)" % (repr(self.faultcode),
repr(self.faultstring))
# soap protocol specification & namespace
soap_namespaces = dict(
soap11="http://schemas.xmlsoap.org/soap/envelope/",
soap="http://schemas.xmlsoap.org/soap/envelope/",
soapenv="http://schemas.xmlsoap.org/soap/envelope/",
soap12="http://www.w3.org/2003/05/soap-env",
)
_USE_GLOBAL_DEFAULT = object()
class SoapClient(object):
"Simple SOAP Client (simil PHP)"
def __init__(self, location = None, action = None, namespace = None,
cert = None, trace = False, exceptions = True, proxy = None, ns=False,
soap_ns=None, wsdl = None, cache = False, cacert=None,
sessions=False, soap_server=None, timeout=_USE_GLOBAL_DEFAULT,
http_headers={}
):
"""
:param http_headers: Additional HTTP Headers; example: {'Host': 'ipsec.example.com'}
"""
self.certssl = cert
self.keyssl = None
self.location = location # server location (url)
self.action = action # SOAP base action
self.namespace = namespace # message
self.trace = trace # show debug messages
self.exceptions = exceptions # lanzar execpiones? (Soap Faults)
self.xml_request = self.xml_response = ''
self.http_headers = http_headers
if not soap_ns and not ns:
self.__soap_ns = 'soap' # 1.1
elif not soap_ns and ns:
self.__soap_ns = 'soapenv' # 1.2
else:
self.__soap_ns = soap_ns
# SOAP Server (special cases like oracle or jbossas6)
self.__soap_server = soap_server
# SOAP Header support
self.__headers = {} # general headers
self.__call_headers = None # OrderedDict to be marshalled for RPC Call
# check if the Certification Authority Cert is a string and store it
if cacert and cacert.startswith("-----BEGIN CERTIFICATE-----"):
fd, filename = tempfile.mkstemp()
f = os.fdopen(fd, 'w+b', -1)
if self.trace: log.info(u"Saving CA certificate to %s" % filename)
f.write(cacert)
cacert = filename
f.close()
self.cacert = cacert
if timeout is _USE_GLOBAL_DEFAULT:
timeout = TIMEOUT
else:
timeout = timeout
# Create HTTP wrapper
Http = get_Http()
self.http = Http(timeout=timeout, cacert=cacert, proxy=proxy, sessions=sessions)
self.__ns = ns # namespace prefix or False to not use it
if not ns:
self.__xml = """<?xml version="1.0" encoding="UTF-8"?>
<%(soap_ns)s:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:%(soap_ns)s="%(soap_uri)s">
<%(soap_ns)s:Header/>
<%(soap_ns)s:Body>
<%(method)s xmlns="%(namespace)s">
</%(method)s>
</%(soap_ns)s:Body>
</%(soap_ns)s:Envelope>"""
else:
self.__xml = """<?xml version="1.0" encoding="UTF-8"?>
<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s" xmlns:%(ns)s="%(namespace)s">
<%(soap_ns)s:Header/>
<%(soap_ns)s:Body>
<%(ns)s:%(method)s>
</%(ns)s:%(method)s>
</%(soap_ns)s:Body>
</%(soap_ns)s:Envelope>"""
# parse wsdl url
self.services = wsdl and self.wsdl_parse(wsdl, debug=trace, cache=cache)
self.service_port = None # service port for late binding
def __getattr__(self, attr):
"Return a pseudo-method that can be called"
if not self.services: # not using WSDL?
return lambda self=self, *args, **kwargs: self.call(attr,*args,**kwargs)
else: # using WSDL:
return lambda *args, **kwargs: self.wsdl_call(attr,*args,**kwargs)
def call(self, method, *args, **kwargs):
"""Prepare xml request and make SOAP call, returning a SimpleXMLElement.
If a keyword argument called "headers" is passed with a value of a
SimpleXMLElement object, then these headers will be inserted into the
request.
"""
#TODO: method != input_message
# Basic SOAP request:
xml = self.__xml % dict(method=method, namespace=self.namespace, ns=self.__ns,
soap_ns=self.__soap_ns, soap_uri=soap_namespaces[self.__soap_ns])
request = SimpleXMLElement(xml,namespace=self.__ns and self.namespace, prefix=self.__ns)
try:
request_headers = kwargs.pop('headers')
except KeyError:
request_headers = None
# serialize parameters
if kwargs:
parameters = kwargs.items()
else:
parameters = args
if parameters and isinstance(parameters[0], SimpleXMLElement):
# merge xmlelement parameter ("raw" - already marshalled)
if parameters[0].children() is not None:
for param in parameters[0].children():
getattr(request,method).import_node(param)
elif parameters:
# marshall parameters:
for k,v in parameters: # dict: tag=valor
getattr(request,method).marshall(k,v)
elif not self.__soap_server in ('oracle', ) or self.__soap_server in ('jbossas6',):
# JBossAS-6 requires no empty method parameters!
delattr(request("Body", ns=soap_namespaces.values(),), method)
# construct header and parameters (if not wsdl given) except wsse
if self.__headers and not self.services:
self.__call_headers = dict([(k, v) for k, v in self.__headers.items()
if not k.startswith("wsse:")])
# always extract WS Security header and send it
if 'wsse:Security' in self.__headers:
#TODO: namespaces too hardwired, clean-up...
header = request('Header' , ns=soap_namespaces.values(),)
k = 'wsse:Security'
v = self.__headers[k]
header.marshall(k, v, ns=False, add_children_ns=False)
header(k)['xmlns:wsse'] = 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd'
#<wsse:UsernameToken xmlns:wsu='http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd'>
if self.__call_headers:
header = request('Header' , ns=soap_namespaces.values(),)
for k, v in self.__call_headers.items():
##if not self.__ns:
## header['xmlns']
header.marshall(k, v, ns=self.__ns, add_children_ns=False)
if request_headers:
header = request('Header' , ns=soap_namespaces.values(),)
for subheader in request_headers.children():
header.import_node(subheader)
self.xml_request = request.as_xml()
self.xml_response = self.send(method, self.xml_request)
response = SimpleXMLElement(self.xml_response, namespace=self.namespace)
if self.exceptions and response("Fault", ns=soap_namespaces.values(), error=False):
raise SoapFault(unicode(response.faultcode), unicode(response.faultstring))
return response
def send(self, method, xml):
"Send SOAP request using HTTP"
if self.location == 'test': return
# location = "%s" % self.location #?op=%s" % (self.location, method)
location = self.location
if self.services:
soap_action = self.action
else:
soap_action = self.action + method
headers={
'Content-type': 'text/xml; charset="UTF-8"',
'Content-length': str(len(xml)),
"SOAPAction": "\"%s\"" % (soap_action)
}
headers.update(self.http_headers)
log.info("POST %s" % location)
log.info("Headers: %s" % headers)
if self.trace:
print "-"*80
print "POST %s" % location
print '\n'.join(["%s: %s" % (k,v) for k,v in headers.items()])
print u"\n%s" % xml.decode("utf8","ignore")
response, content = self.http.request(
location, "POST", body=xml, headers=headers)
self.response = response
self.content = content
if self.trace:
print
print '\n'.join(["%s: %s" % (k,v) for k,v in response.items()])
print content#.decode("utf8","ignore")
print "="*80
return content
def get_operation(self, method):
# try to find operation in wsdl file
soap_ver = self.__soap_ns == 'soap12' and 'soap12' or 'soap11'
if not self.service_port:
for service_name, service in self.services.items():
for port_name, port in [port for port in service['ports'].items()]:
if port['soap_ver'] == soap_ver:
self.service_port = service_name, port_name
break
else:
raise RuntimeError("Cannot determine service in WSDL: "
"SOAP version: %s" % soap_ver)
else:
port = self.services[self.service_port[0]]['ports'][self.service_port[1]]
self.location = port['location']
operation = port['operations'].get(unicode(method))
if not operation:
raise RuntimeError("Operation %s not found in WSDL: "
"Service/Port Type: %s" %
(method, self.service_port))
return operation
def wsdl_call(self, method, *args, **kwargs):
"Pre and post process SOAP call, input and output parameters using WSDL"
soap_uri = soap_namespaces[self.__soap_ns]
operation = self.get_operation(method)
# get i/o type declarations:
input = operation['input']
output = operation['output']
header = operation.get('header')
if 'action' in operation:
self.action = operation['action']
# sort parameters (same order as xsd:sequence)
def sort_dict(od, d):
if isinstance(od, dict):
ret = OrderedDict()
for k in od.keys():
v = d.get(k)
# don't append null tags!
if v is not None:
if isinstance(v, dict):
v = sort_dict(od[k], v)
elif isinstance(v, list):
v = [sort_dict(od[k][0], v1)
for v1 in v]
ret[str(k)] = v
return ret
else:
return d
# construct header and parameters
if header:
self.__call_headers = sort_dict(header, self.__headers)
if input and args:
# convert positional parameters to named parameters:
d = [(k, arg) for k, arg in zip(input.values()[0].keys(), args)]
kwargs.update(dict(d))
if input and kwargs:
params = sort_dict(input.values()[0], kwargs).items()
if self.__soap_server == "axis":
# use the operation name
method = method
else:
# use the message (element) name
method = input.keys()[0]
#elif not input:
#TODO: no message! (see wsmtxca.dummy)
else:
params = kwargs and kwargs.items()
# call remote procedure
response = self.call(method, *params)
# parse results:
resp = response('Body',ns=soap_uri).children().unmarshall(output)
return resp and resp.values()[0] # pass Response tag children
def help(self, method):
"Return operation documentation and invocation/returned value example"
operation = self.get_operation(method)
input = operation.get('input')
input = input and input.values() and input.values()[0]
if isinstance(input, dict):
input = ", ".join("%s=%s" % (k,repr(v)) for k,v
in input.items())
elif isinstance(input, list):
input = repr(input)
output = operation.get('output')
if output:
output = operation['output'].values()[0]
headers = operation.get('headers') or None
return u"%s(%s)\n -> %s:\n\n%s\nHeaders: %s" % (
method,
input or "",
output and output or "",
operation.get("documentation",""),
headers,
)
def wsdl_parse(self, url, debug=False, cache=False):
"Parse Web Service Description v1.1"
log.debug("wsdl url: %s" % url)
# Try to load a previously parsed wsdl:
force_download = False
if cache:
# make md5 hash of the url for caching...
filename_pkl = "%s.pkl" % hashlib.md5(url).hexdigest()
if isinstance(cache, basestring):
filename_pkl = os.path.join(cache, filename_pkl)
if os.path.exists(filename_pkl):
log.debug("Unpickle file %s" % (filename_pkl, ))
f = open(filename_pkl, "r")
pkl = pickle.load(f)
f.close()
# sanity check:
if pkl['version'][:-1] != __version__.split(" ")[0][:-1] or pkl['url'] != url:
import warnings
warnings.warn('version or url mismatch! discarding cached wsdl', RuntimeWarning)
if debug:
log.debug('Version: %s %s' % (pkl['version'], __version__))
log.debug('URL: %s %s' % (pkl['url'], url))
force_download = True
else:
self.namespace = pkl['namespace']
self.documentation = pkl['documentation']
return pkl['services']
soap_ns = {
"http://schemas.xmlsoap.org/wsdl/soap/": 'soap11',
"http://schemas.xmlsoap.org/wsdl/soap12/": 'soap12',
}
wsdl_uri="http://schemas.xmlsoap.org/wsdl/"
xsd_uri="http://www.w3.org/2001/XMLSchema"
xsi_uri="http://www.w3.org/2001/XMLSchema-instance"
get_local_name = lambda s: s and str((':' in s) and s.split(':')[1] or s)
get_namespace_prefix = lambda s: s and str((':' in s) and s.split(':')[0] or None)
# always return an unicode object:
REVERSE_TYPE_MAP[u'string'] = unicode
def fetch(url):
"Download a document from a URL, save it locally if cache enabled"
# check / append a valid schema if not given:
url_scheme, netloc, path, query, fragment = urlsplit(url)
if not url_scheme in ('http','https', 'file'):
for scheme in ('http','https', 'file'):
try:
if not url.startswith("/") and scheme in ('http', 'https'):
tmp_url = "%s://%s" % (scheme, url)
else:
tmp_url = "%s:%s" % (scheme, url)
if debug: log.debug("Scheme not found, trying %s" % scheme)
return fetch(tmp_url)
except Exception, e:
log.error(e)
raise RuntimeError("No scheme given for url: %s" % url)
# make md5 hash of the url for caching...
filename = "%s.xml" % hashlib.md5(url).hexdigest()
if isinstance(cache, basestring):
filename = os.path.join(cache, filename)
if cache and os.path.exists(filename) and not force_download:
log.info("Reading file %s" % (filename, ))
f = open(filename, "r")
xml = f.read()
f.close()
else:
if url_scheme == 'file':
log.info("Fetching url %s using urllib2" % (url, ))
f = urllib2.urlopen(url)
xml = f.read()
else:
log.info("GET %s using %s" % (url, self.http._wrapper_version))
response, xml = self.http.request(url, "GET", None, {})
if cache:
log.info("Writing file %s" % (filename, ))
if not os.path.isdir(cache):
os.makedirs(cache)
f = open(filename, "w")
f.write(xml)
f.close()
return xml
# Open uri and read xml:
xml = fetch(url)
# Parse WSDL XML:
wsdl = SimpleXMLElement(xml, namespace=wsdl_uri)
# detect soap prefix and uri (xmlns attributes of <definitions>)
xsd_ns = None
soap_uris = {}
for k, v in wsdl[:]:
if v in soap_ns and k.startswith("xmlns:"):
soap_uris[get_local_name(k)] = v
if v== xsd_uri and k.startswith("xmlns:"):
xsd_ns = get_local_name(k)
# Extract useful data:
self.namespace = wsdl['targetNamespace']
self.documentation = unicode(wsdl('documentation', error=False) or '')
services = {}
bindings = {} # binding_name: binding
operations = {} # operation_name: operation
port_type_bindings = {} # port_type_name: binding
messages = {} # message: element
elements = {} # element: type def
for service in wsdl.service:
service_name=service['name']
if not service_name:
continue # empty service?
if debug: log.debug("Processing service %s" % service_name)
serv = services.setdefault(service_name, {'ports': {}})
serv['documentation']=service['documentation'] or ''
for port in service.port:
binding_name = get_local_name(port['binding'])
address = port('address', ns=soap_uris.values(), error=False)
location = address and address['location'] or None
soap_uri = address and soap_uris.get(address.get_prefix())
soap_ver = soap_uri and soap_ns.get(soap_uri)
bindings[binding_name] = {'service_name': service_name,
'location': location,
'soap_uri': soap_uri, 'soap_ver': soap_ver,
}
serv['ports'][port['name']] = bindings[binding_name]
for binding in wsdl.binding:
binding_name = binding['name']
if debug: log.debug("Processing binding %s" % service_name)
soap_binding = binding('binding', ns=soap_uris.values(), error=False)
transport = soap_binding and soap_binding['transport'] or None
port_type_name = get_local_name(binding['type'])
bindings[binding_name].update({
'port_type_name': port_type_name,
'transport': transport, 'operations': {},
})
port_type_bindings[port_type_name] = bindings[binding_name]
for operation in binding.operation:
op_name = operation['name']
op = operation('operation',ns=soap_uris.values(), error=False)
action = op and op['soapAction']
d = operations.setdefault(op_name, {})
bindings[binding_name]['operations'][op_name] = d
d.update({'name': op_name})
d['parts'] = {}
# input and/or ouput can be not present!
input = operation('input', error=False)
body = input and input('body', ns=soap_uris.values(), error=False)
d['parts']['input_body'] = body and body['parts'] or None
output = operation('output', error=False)
body = output and output('body', ns=soap_uris.values(), error=False)
d['parts']['output_body'] = body and body['parts'] or None
header = input and input('header', ns=soap_uris.values(), error=False)
d['parts']['input_header'] = header and {'message': header['message'], 'part': header['part']} or None
headers = output and output('header', ns=soap_uris.values(), error=False)
d['parts']['output_header'] = header and {'message': header['message'], 'part': header['part']} or None
#if action: #TODO: separe operation_binding from operation
if action:
d["action"] = action
def make_key(element_name, element_type):
"return a suitable key for elements"
# only distinguish 'element' vs other types
if element_type in ('complexType', 'simpleType'):
eltype = 'complexType'
else:
eltype = element_type
if eltype not in ('element', 'complexType', 'simpleType'):
raise RuntimeError("Unknown element type %s = %s" % (unicode(element_name), eltype))
return (unicode(element_name), eltype)
#TODO: cleanup element/schema/types parsing:
def process_element(element_name, node, element_type):
"Parse and define simple element types"
if debug:
log.debug("Processing element %s %s" % (element_name, element_type))
for tag in node:
if tag.get_local_name() in ("annotation", "documentation"):
continue
elif tag.get_local_name() in ('element', 'restriction'):
if debug: log.debug("%s has not children! %s" % (element_name,tag))
children = tag # element "alias"?
alias = True
elif tag.children():
children = tag.children()
alias = False
else:
if debug: log.debug("%s has not children! %s" % (element_name,tag))
continue #TODO: abstract?
d = OrderedDict()
for e in children:
t = e['type']
if not t:
t = e['base'] # complexContent (extension)!
if not t:
t = 'anyType' # no type given!
t = t.split(":")
if len(t)>1:
ns, type_name = t
else:
ns, type_name = None, t[0]
if element_name == type_name:
pass ## warning with infinite recursion
uri = ns and e.get_namespace_uri(ns) or xsd_uri
if uri==xsd_uri:
# look for the type, None == any
fn = REVERSE_TYPE_MAP.get(unicode(type_name), None)
else:
fn = None
if not fn:
# simple / complex type, postprocess later
fn = elements.setdefault(make_key(type_name, "complexType"), OrderedDict())
if e['name'] is not None and not alias:
e_name = unicode(e['name'])
d[e_name] = fn
else:
if debug: log.debug("complexConent/simpleType/element %s = %s" % (element_name, type_name))
d[None] = fn
if e['maxOccurs']=="unbounded" or (ns == 'SOAP-ENC' and type_name == 'Array'):
# it's an array... TODO: compound arrays?
d.array = True
if e is not None and e.get_local_name() == 'extension' and e.children():
# extend base element:
process_element(element_name, e.children(), element_type)
elements.setdefault(make_key(element_name, element_type), OrderedDict()).update(d)
# check axis2 namespace at schema types attributes
self.namespace = dict(wsdl.types("schema", ns=xsd_uri)[:]).get('targetNamespace', self.namespace)
imported_schemas = {}
def preprocess_schema(schema):
"Find schema elements and complex types"
for element in schema.children() or []:
if element.get_local_name() in ('import', ):
schema_namespace = element['namespace']
schema_location = element['schemaLocation']
if schema_location is None:
if debug: log.debug("Schema location not provided for %s!" % (schema_namespace, ))
continue
if schema_location in imported_schemas:
if debug: log.debug("Schema %s already imported!" % (schema_location, ))
continue
imported_schemas[schema_location] = schema_namespace
if debug: print "Importing schema %s from %s" % (schema_namespace, schema_location)
# Open uri and read xml:
xml = fetch(schema_location)
# Parse imported XML schema (recursively):
imported_schema = SimpleXMLElement(xml, namespace=xsd_uri)
preprocess_schema(imported_schema)
element_type = element.get_local_name()
if element_type in ('element', 'complexType', "simpleType"):
element_name = unicode(element['name'])
if debug: log.debug("Parsing Element %s: %s" % (element_type, element_name))
if element.get_local_name() == 'complexType':
children = element.children()
elif element.get_local_name() == 'simpleType':
children = element("restriction", ns=xsd_uri)
elif element.get_local_name() == 'element' and element['type']:
children = element
else:
children = element.children()
if children:
children = children.children()
elif element.get_local_name() == 'element':
children = element
if children:
process_element(element_name, children, element_type)
def postprocess_element(elements):
"Fix unresolved references (elements referenced before its definition, thanks .net)"
for k,v in elements.items():
if isinstance(v, OrderedDict):
if v.array:
elements[k] = [v] # convert arrays to python lists
if v!=elements: #TODO: fix recursive elements
postprocess_element(v)
if None in v and v[None]: # extension base?
if isinstance(v[None], dict):
for i, kk in enumerate(v[None]):
# extend base -keep orginal order-
if v[None] is not None:
elements[k].insert(kk, v[None][kk], i)
del v[None]
else: # "alias", just replace
if debug: log.debug("Replacing %s = %s" % (k, v[None]))
elements[k] = v[None]
#break
if isinstance(v, list):
for n in v: # recurse list
postprocess_element(n)
# process current wsdl schema:
for schema in wsdl.types("schema", ns=xsd_uri):
preprocess_schema(schema)
postprocess_element(elements)
for message in wsdl.message:
if debug: log.debug("Processing message %s" % message['name'])
for part in message('part', error=False) or []:
element = {}
element_name = part['element']
if not element_name:
# some implementations (axis) uses type instead
element_name = part['type']
type_ns = get_namespace_prefix(element_name)
type_uri = wsdl.get_namespace_uri(type_ns)
if type_uri == xsd_uri:
element_name = get_local_name(element_name)
fn = REVERSE_TYPE_MAP.get(unicode(element_name), None)
element = {part['name']: fn}
# emulate a true Element (complexType)
messages.setdefault((message['name'], None), {message['name']: OrderedDict()}).values()[0].update(element)
else:
element_name = get_local_name(element_name)
fn = elements.get(make_key(element_name, 'element'))
if not fn:
# some axis servers uses complexType for part messages
fn = elements.get(make_key(element_name, 'complexType'))
element = {message['name']: {part['name']: fn}}
else:
element = {element_name: fn}
messages[(message['name'], part['name'])] = element
def get_message(message_name, part_name):
if part_name:
# get the specific part of the message:
return messages.get((message_name, part_name))
else:
# get the first part for the specified message:
for (message_name_key, part_name_key), message in messages.items():
if message_name_key == message_name:
return message
for port_type in wsdl.portType:
port_type_name = port_type['name']
if debug: log.debug("Processing port type %s" % port_type_name)
binding = port_type_bindings[port_type_name]
for operation in port_type.operation:
op_name = operation['name']
op = operations[op_name]
op['documentation'] = unicode(operation('documentation', error=False) or '')
if binding['soap_ver']:
#TODO: separe operation_binding from operation (non SOAP?)
if operation("input", error=False):
input_msg = get_local_name(operation.input['message'])
input_header = op['parts'].get('input_header')
if input_header:
header_msg = get_local_name(input_header.get('message'))
header_part = get_local_name(input_header.get('part'))
# warning: some implementations use a separate message!
header = get_message(header_msg or input_msg, header_part)
else:
header = None # not enought info to search the header message:
op['input'] = get_message(input_msg, op['parts'].get('input_body'))
op['header'] = header
else:
op['input'] = None
op['header'] = None
if operation("output", error=False):
output_msg = get_local_name(operation.output['message'])
op['output'] = get_message(output_msg, op['parts'].get('output_body'))
else:
op['output'] = None
if debug:
import pprint
log.debug(pprint.pformat(services))
# Save parsed wsdl (cache)
if cache:
f = open(filename_pkl, "wb")
pkl = {
'version': __version__.split(" ")[0],
'url': url,
'namespace': self.namespace,
'documentation': self.documentation,
'services': services,
}
pickle.dump(pkl, f)
f.close()
return services
def __setitem__(self, item, value):
"Set SOAP Header value - this header will be sent for every request."
self.__headers[item] = value
def close(self):
"Finish the connection and remove temp files"
self.http.close()
if self.cacert.startswith(tempfile.gettempdir()):
if self.trace: log.info("removing %s" % self.cacert)
os.unlink(self.cacert)
def parse_proxy(proxy_str):
"Parses proxy address user:pass@host:port into a dict suitable for httplib2"
if isinstance(proxy_str, unicode):
proxy_str = proxy_str.encode("utf8")
proxy_dict = {}
if proxy_str is None:
return
if "@" in proxy_str:
user_pass, host_port = proxy_str.split("@")
else:
user_pass, host_port = "", proxy_str
if ":" in host_port:
host, port = host_port.split(":")
proxy_dict['proxy_host'], proxy_dict['proxy_port'] = host, int(port)
if ":" in user_pass:
proxy_dict['proxy_user'], proxy_dict['proxy_pass'] = user_pass.split(":")
return proxy_dict
if __name__ == "__main__":
pass
|
ccpgames/eve-metrics
|
web2py/gluon/contrib/pysimplesoap/client.py
|
Python
|
mit
| 36,225
|
from setuptools import setup, find_packages
setup(name='MODEL1310110042',
version=20140916,
description='MODEL1310110042 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/MODEL1310110042',
maintainer='Stanley Gu',
maintainer_url='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
)
|
biomodels/MODEL1310110042
|
setup.py
|
Python
|
cc0-1.0
| 377
|
#!/usr/bin/python
import json
from pprint import pprint
import re
import urllib
import time
from geopy import geocoders
import Image
import os
# TODO: handle test cases
# testcases:
# hollywood & vine, hollywood and vine
# order of operations: hashtag, img, address, other text.
# hashtag allcaps or lowercase
# uploaded image, link to hosted image
# multiple urls? currently hard-coded to only accept the first url seen. probably best this way.
class TwitterJsonParser():
# parser useful fields from file of json tweet objects
def get_data_from_tweets(self, input_data):
g = geocoders.GoogleV3()
tweet_data = []
processed_tweets = []
with open(input_data) as f:
for line in f:
if line.strip():
tweet_data = json.loads(line)
tweet = tweet_data["text"]
# scrub out any @mentions or #hashtags to leave behind address / text
tweet_text = ' '.join(re.sub("(@[A-Za-z0-9]+)|(#[A-Za-z0-9]+)|(\w+:\/\/\S+)"," ",tweet).split())
# geocode address to lat/long
address, (lat, lng) = g.geocode(tweet_text)
# TODO: this is a good place to validate the address for an LA coordinate.
# if not LA, toss in a bucket to be human-examined
# img uploaded via twitter
if tweet_data["entities"].get('media'):
print "DEBUG: img uploaded"
img_url = tweet_data["entities"]["media"][0]["media_url"]
# if img passed as url
else:
print "DEBUG: img as url"
img_url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', tweet)[0]
print("tweet: %s") % tweet
print("tweet_text: %s, img_url: %s") % (tweet_text, img_url)
print("address: %s, lat: %s, lng: %s") % (address, lat, lng)
self.save_img_from_tweet(str(lat), str(lng), img_url)
processed_tweets.extend([address, str(lat), str(lng), img_url])
return processed_tweets
# this is run on one tweet at a time
def save_img_from_tweet(self, lat, lng, img_url):
DIR_FINISHED_IMGS = '../data_finished_images'
IMG_NAME = lat + '_' + lng + '_.PNG'
if (False == os.path.isfile(DIR_FINISHED_IMGS + '/' + IMG_NAME)):
# save url to disk with address as filename
try:
file = urllib.urlretrieve(img_url, DIR_FINISHED_IMGS + '/' + IMG_NAME)
print("Saved: %s" % DIR_FINISHED_IMGS + '/' + IMG_NAME)
except IOError, e:
print 'could not retrieve %s' % IMG_NAME
try:
im = Image.open(DIR_FINISHED_IMGS + '/' + IMG_NAME)
# TODO: need to figure out what thumbnail size looks best on projector
im2 = im.resize((40, 40), Image.NEAREST)
im2.save(DIR_FINISHED_IMGS + '/thumb_' + IMG_NAME)
except IOError, e:
print 'could not open resize and save %s' % IMG_NAME
time.sleep(1.5)
print("--------------------------------------------------------") # DEBUG
else:
print("file already exists. Skipping %s") % DIR_FINISHED_IMGS + '/' + IMG_NAME
return
return
|
levisimons/CRASHLACMA
|
CRASHLACMA/twitter_json_parser.py
|
Python
|
cc0-1.0
| 2,983
|
# coding=utf-8
from app import mongo_utils
from bson import json_util
from flask import Blueprint, render_template, request, Response,session
import json
import time
from datetime import datetime
from operator import itemgetter
mod_main = Blueprint('main', __name__)
@mod_main.route('/', methods=['GET'])
def index():
project_enabled = mongo_utils.get_enabled_project()
timestamp = int(time.mktime(datetime.now().timetuple()))
session['user_id'] = timestamp
user_id = session['user_id']
year=2017
for project in json.loads(json_util.dumps(project_enabled)):
year=project['year']
docs = mongo_utils.find_all(project['year'])
count_questions = mongo_utils.get_nr_questions_front(project['year'])
questions = mongo_utils.find_all_questions(project['year'])
date=datetime.utcnow()
mongo_utils.insert_user_session(user_id,year,date)
return render_template('mod_main/index.html', docs=json.loads(json_util.dumps(docs)),questions=json.loads(json_util.dumps(questions)), count_questions=count_questions,user_id=user_id)
@mod_main.route('/results/<int:user_id>', methods=['GET'])
def results(user_id):
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
docs = mongo_utils.find_all(project['year'])
results=mongo_utils.find_user_session_answers(project['year'],user_id)
countquestions=0
number_of_questions = len(json.loads(json_util.dumps(results['all_question'])))
candidates = json.loads(json_util.dumps(results['candidate_results']))
users = json.loads(json_util.dumps(results['user_results']))
candidates_array = []
while countquestions <= number_of_questions:
for candidate in candidates:
candidate_match = 0
for user in users:
if 'question_'+str(countquestions) in candidate and 'question_'+str(countquestions) in user:
if 'vazno_' + str(countquestions) in candidate and 'vazno_' + str(countquestions) in user:
candidate_question_value = candidate['question_' + str(countquestions)]
candidate_status_value = candidate['status_' + str(countquestions)]
candidate_vazno_value = candidate['vazno_' + str(countquestions)]
user_question_value = user['question_' + str(countquestions)]
user_status_value = user['status_' + str(countquestions)]
user_vazno_value = user['vazno_' + str(countquestions)]
if candidate_vazno_value == user_vazno_value and candidate_status_value == user_status_value and candidate_question_value == user_question_value:
candidate_match += 1
candidates_array.append({
"candidate_slug": candidate['candidate_slug'],
'question': candidate_question_value,
'status': candidate_status_value,
'vazno': candidate_vazno_value,
"matchcount": candidate_match,
})
countquestions += 1
candidates_percentages = []
for candidate in json.loads(json_util.dumps(results['candidates'])):
percentage = 0
count_match=0
for c_a in candidates_array:
if candidate['generated_id']==c_a['candidate_slug']:
count_match += 1
percentage = (float(count_match)/ number_of_questions) * 100
candidates_percentages.append({
'candidate_name':candidate['candidate_name'],
'percentage':percentage,
'candidate_biography':candidate['candidate_biography'],
'image':candidate['image']
})
sorted_c_array=sorted(candidates_percentages, key=itemgetter('percentage'),reverse=True)
return render_template('mod_main/results.html', docs=json.loads(json_util.dumps(results)),results=json.loads(json_util.dumps(sorted_c_array)),user_id=user_id)
@mod_main.route('/insertuseranswers', methods=['GET', "POST"])
def insert_user_answers():
if request.method == 'POST':
data = request.form.to_dict()
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
docs = mongo_utils.find_all(project['year'])
data['project_slug']=project['year']
data['user_id'] =session['user_id']
data['timestamp'] = datetime.utcnow()
result=mongo_utils.insert_users_answers(data)
#return render_template('mod_main/user_candidate_results.html.html', docs=json.loads(json_util.dumps(docs)), questions=json.loads(json_util.dumps(questions)), count_questions=count_questions)
return Response(response=json_util.dumps(result), status=200, mimetype='application/json')
@mod_main.route('/getuseranswerresults', methods=['GET', "POST"])
def get_user_answers_results():
if request.method == 'POST':
data = request.form.to_dict()
user_id = data['user_id']
project_year=""
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
docs = mongo_utils.find_all(project['year'])
project_year=project['year']
result = mongo_utils.find_user_session_answers(project_year,user_id)
return Response(response=json_util.dumps(result), status=200, mimetype='application/json')
@mod_main.route('/getusersessionidresults', methods=['GET', "POST"])
def get_user_session_id_results():
if request.method == 'POST':
if session.get('user_id') is not None:
user_id=session['user_id']
else:
user_id=""
data = request.form.to_dict()
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
docs = mongo_utils.find_all(project['year'])
data['project_slug']=project['year']
data['user_id'] = user_id
result=mongo_utils.find_user_session_answers(data)
#return render_template('mod_main/user_candidate_results.html.html', docs=json.loads(json_util.dumps(docs)), questions=json.loads(json_util.dumps(questions)), count_questions=count_questions)
return Response(response=json_util.dumps(result), status=200, mimetype='application/json')
@mod_main.route('/getallquestions', methods=['GET', "POST"])
def get_all_questions():
if request.method == 'GET':
array_questions=[]
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
groups=mongo_utils.find_all(project['year'])
for group in json.loads(json_util.dumps(groups)):
questions = mongo_utils.find_all_questions_ordered(group['generated_id'])
for question in questions:
array_questions.append(question)
return Response(response=json_util.dumps(array_questions), status=200, mimetype='application/json')
@mod_main.route('/getquestionsresults', methods=['GET', "POST"])
def get_questions_results():
if request.method == 'POST':
user_id=session['user_id']
data = request.form.to_dict()
question= data['question_name']
project_slug=""
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
project_slug=project['year']
questions = mongo_utils.find_all_questions(project['year'])
response = mongo_utils.find_all_questions_results(user_id,question,project['year'])
count_question=0
created_array = []
question_key=""
for questions in json.loads(json_util.dumps(response['all_question'])):
count_question = count_question + 1
question_key=count_question
answers = mongo_utils.find_all_answers_s(question_key,question,project_slug)
answers_users = mongo_utils.find_all_answers_users(question_key,question,user_id,project_slug)
for r in json.loads(json_util.dumps(answers)):
candidate_name_result = mongo_utils.get_candidate_name(r['candidate_slug'])
for c_name in json.loads(json_util.dumps(candidate_name_result)):
candidate_name=c_name['candidate_name']
if 'status_'+str(question_key) in r:
status=r['status_'+str(question_key)]
else:
status="/"
if 'vazno_'+str(question_key) in r:
vazno=r['vazno_'+str(question_key)]
else:
vazno="/"
if 'comment_' + str(question_key) in r:
comment = r['comment_' + str(question_key)]
else:
comment = "/"
created_array.append({'candidate_slug':candidate_name,'status':status,'vazno':vazno,'comment':comment})
for r_u in json.loads(json_util.dumps(answers_users)):
if 'status_' + str(question_key) in r_u:
status = r_u['status_' + str(question_key)]
else:
status = "/"
if 'vazno_' + str(question_key) in r_u:
vazno = r_u['vazno_' + str(question_key)]
else:
vazno = "/"
created_array.append({'candidate_slug': 'Vaš odgovor', 'status': status, 'vazno': vazno,'comment': "/"})
return Response(response=json_util.dumps(created_array), status=200, mimetype='application/json')
@mod_main.route('/getquestionsresultsshared', methods=['GET', "POST"])
def get_questions_results_shared():
if request.method == 'POST':
data = request.form.to_dict()
question= data['question_name']
user_id=data['user_id']
project_slug=""
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
project_slug=project['year']
questions = mongo_utils.find_all_questions(project['year'])
response = mongo_utils.find_all_questions_results(user_id,question,project['year'])
count_question=0
created_array = []
question_key=""
for questions in json.loads(json_util.dumps(response['all_question'])):
count_question = count_question + 1
question_key=count_question
answers = mongo_utils.find_all_answers_s(question_key,question,project_slug)
answers_users = mongo_utils.find_all_answers_users(question_key,question,user_id,project_slug)
for r in json.loads(json_util.dumps(answers)):
candidate_name_result = mongo_utils.get_candidate_name(r['candidate_slug'])
for c_name in json.loads(json_util.dumps(candidate_name_result)):
candidate_name=c_name['candidate_name']
if 'status_'+str(question_key) in r:
status=r['status_'+str(question_key)]
else:
status="/"
if 'vazno_'+str(question_key) in r:
vazno=r['vazno_'+str(question_key)]
else:
vazno="/"
if 'comment_' + str(question_key) in r:
comment = r['comment_' + str(question_key)]
else:
comment = "/"
created_array.append({'candidate_slug':candidate_name,'status':status,'vazno':vazno,'comment':comment})
for r_u in json.loads(json_util.dumps(answers_users)):
if 'status_' + str(question_key) in r_u:
status = r_u['status_' + str(question_key)]
else:
status = "/"
if 'vazno_' + str(question_key) in r_u:
vazno = r_u['vazno_' + str(question_key)]
else:
vazno = "/"
created_array.append({'candidate_slug': 'Moj odgovor', 'status': status, 'vazno': vazno,'comment': "/"})
return Response(response=json_util.dumps(created_array), status=200, mimetype='application/json')
@mod_main.route('/getallqu', methods=['GET', "POST"])
def get_all_q_a_u():
if request.method == 'GET':
if session.get('user_id') is not None:
user_id = session['user_id']
count_question=0
create_question_array=[]
project_slug=""
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
response_all_questions=mongo_utils.find_all_questions(project['year'])
project_slug=project['year']
for raq in json.loads(json_util.dumps(response_all_questions)):
count_question=count_question+1
question_key=count_question
response_user_q = mongo_utils.find_all_questions_user(user_id,project_slug)
for ruq in json.loads(json_util.dumps(response_user_q)):
if 'vazno_'+str(count_question) in ruq and 'status_'+str(count_question) in ruq:
create_question_array.append({'question_name':ruq['question_'+str(count_question)]})
return Response(response=json_util.dumps(create_question_array), status=200, mimetype='application/json')
@mod_main.route('/getallquresults', methods=['GET', "POST"])
def get_all_q_a_u_result():
if request.method == 'POST':
data = request.form.to_dict()
user_id=data['user_id']
count_question=0
create_question_array=[]
project_slug=""
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
response_all_questions=mongo_utils.find_all_questions(project['year'])
project_slug=project['year']
for raq in json.loads(json_util.dumps(response_all_questions)):
count_question=count_question+1
question_key=count_question
response_user_q = mongo_utils.find_all_questions_user(user_id,project_slug)
for ruq in json.loads(json_util.dumps(response_user_q)):
if 'vazno_'+str(count_question) in ruq and 'status_'+str(count_question) in ruq:
create_question_array.append({'question_name':ruq['question_'+str(count_question)]})
return Response(response=json_util.dumps(create_question_array), status=200, mimetype='application/json')
@mod_main.route('/getanswersusercandidate', methods=['GET', "POST"])
def get_answers_user_candidate():
if request.method=="POST":
if session.get('user_id') is not None:
user_id = session['user_id']
data = request.form.to_dict()
created_array=[]
count_question = 0
question_key=0;
response_users_questtion = mongo_utils.find_users_question_a(user_id)
response_all_questions = mongo_utils.find_all_questions()
for raq in json.loads(json_util.dumps(response_all_questions)):
count_question = count_question + 1
question_key=count_question
response_canidates_questtion = mongo_utils.find_candidates_question_a(question_key,data['question_name'])
answers_users = mongo_utils.find_all_answers_users(question_key, data['question_name'], user_id)
for r_candidates in json.loads(json_util.dumps(response_canidates_questtion)):
candidate_name_result = mongo_utils.get_candidate_name(r_candidates['candidate_slug'])
for c_name in json.loads(json_util.dumps(candidate_name_result)):
candidate_name = c_name['candidate_name']
if 'status_' + str(question_key) in r_candidates:
status = r_candidates['status_' + str(question_key)]
else:
status = "/"
if 'vazno_' + str(question_key) in r_candidates:
vazno = r_candidates['vazno_' + str(question_key)]
else:
vazno = "/"
if 'comment_' + str(question_key) in r_candidates:
comment = r_candidates['comment_' + str(question_key)]
else:
comment = "/"
created_array.append({'candidate_slug':candidate_name,'vazno':vazno,'status':status,'comment':comment})
for r_users in json.loads(json_util.dumps(answers_users)):
if 'status_' + str(question_key) in r_users:
status = r_users['status_' + str(question_key)]
else:
status = "/"
if 'vazno_' + str(question_key) in r_users:
vazno = r_users['vazno_' + str(question_key)]
else:
vazno = "/"
created_array.append({'candidate_slug':"Vaš odgovor",'vazno':vazno,'status':status,'comment':"/"})
return Response(response=json_util.dumps(created_array), status=200, mimetype='application/json')
|
crtarsorg/glasomer.rs-v2
|
app/mod_main/views.py
|
Python
|
cc0-1.0
| 17,233
|
print("In Python, what do you call a 'box' used to store data?")
answer = input()
if answer == "variable":
print(" :) ")
else:
print(" :( ")
print("Thank you for playing!")
print('''
Q1 - In Python, what do you call a 'box' used to store data?
a - text
b - variable
c - a shoe box
''')
answer = input().lower()
if answer == "a":
print(" Nope - text is a type of data :( ")
elif answer == "b":
print(" Correct!! :) ")
elif answer == "c":
print(" Don't be silly! :( ")
else:
print(" You didn't choose a, b or c :( ")
|
arve0/example_lessons
|
src/python/lessons/Quiz/Project Resources/Quiz.py
|
Python
|
cc0-1.0
| 553
|
from scapy.all import *
import plotly
from datetime import datetime
import pandas as pd
#Read the packets from file
packets=rdpcap('mypcap.pcap')
#lists to hold packetinfo
pktBytes=[]
pktTimes=[]
#Read each packet and append to the lists
for pkt in packets:
if IP in pkt:
try:
pktBytes.append(pkt[IP].len)
pktTime=datetime.fromtimestamp(pkt.time)
pktTimes.append(pktTime.strftime("%Y-%m-%d %H:%M:%S.%f"))
except:
pass
# convert list to series
bytes=pd.Series(pktBytes).astype(int)
times=pd.to_datetime(pd.Series(pktTimes).astype(str), errors='coerce')
#Create the dateframe
df=pd.DataFrame({"Bytes": bytes, "Times":times})
#Set the date to a timestamp
df=df.set_index('Times')
df2=df.resample('2S').sum()
print(df2)
#Create the graph
plotly.offline.plot({
"data":[plotly.graph_objs.Scatter(x=df2.index, y=df2['Bytes'])],
"layout":plotly.graph_objs.Layout(title="Bytes over Time ",
xaxis=dict(title="Time"),
yaxis=dict(title="Bytes"))})
Output
|
erikjjpost/scripts
|
PcapTimeline.py
|
Python
|
cc0-1.0
| 1,022
|
import zmq
ctx = zmq.Context.instance()
server = ctx.socket(zmq.PUSH)
server.bind('inproc://foo')
clients = [ctx.socket(zmq.PULL) for i in range(10)]
poller = zmq.Poller()
for client in clients:
client.connect('inproc://foo')
poller.register(client, zmq.POLLIN)
for client in clients:
server.send(b'DATA')
for sock, flags in poller.poll(0):
print(sock, repr(sock.recv()))
|
waveform80/presentations
|
concurrency/demo3.py
|
Python
|
cc0-1.0
| 390
|
#!/usr/bin/env python
"""
NPR 2017-01-22
www.npr.org/2017/01/22/511046359/youve-got-to-comb-together-to-solve-this-one
The numbers 5,000, 8,000, and 9,000 share a property that only five integers altogether have.
Identify the property and the two other integers that have it.
"""
# The property is that they are supervocalic (one each of aeiou).
# This code will simply try to find the other such numbers.
def is_supervocalic(w):
'''
Determine if a word has one each of a, e, i, o, u
We also want it not to have a 'y'
'''
vowels = 'aeiou'
for vowel in vowels:
if w.lower().count(vowel) != 1:
return False
if 'y' in w.lower():
return False
return True
# Thanks to http://stackoverflow.com/a/19193721
def numToWords(num,join=True):
'''words = {} convert an integer number into words'''
units = ['','one','two','three','four','five','six','seven','eight','nine']
teens = ['','eleven','twelve','thirteen','fourteen','fifteen','sixteen', \
'seventeen','eighteen','nineteen']
tens = ['','ten','twenty','thirty','forty','fifty','sixty','seventy', \
'eighty','ninety']
thousands = ['','thousand','million','billion','trillion','quadrillion', \
'quintillion','sextillion','septillion','octillion', \
'nonillion','decillion','undecillion','duodecillion', \
'tredecillion','quattuordecillion','sexdecillion', \
'septendecillion','octodecillion','novemdecillion', \
'vigintillion']
words = []
if num==0: words.append('zero')
else:
numStr = '%d'%num
numStrLen = len(numStr)
groups = (numStrLen+2)/3
numStr = numStr.zfill(groups*3)
for i in range(0,groups*3,3):
h,t,u = int(numStr[i]),int(numStr[i+1]),int(numStr[i+2])
g = groups-(i/3+1)
if h>=1:
words.append(units[h])
words.append('hundred')
if t>1:
words.append(tens[t])
if u>=1: words.append(units[u])
elif t==1:
if u>=1: words.append(teens[u])
else: words.append(tens[t])
else:
if u>=1: words.append(units[u])
if (g>=1) and ((h+t+u)>0): words.append(thousands[g])
if join: return ' '.join(words)
return words
# Note that every integer greater than 100,000 has a repeated vowel
for i in range(100000):
word = numToWords(i)
if is_supervocalic(word):
print i, word
|
boisvert42/npr-puzzle-python
|
2017/0122_unusual_numbers.py
|
Python
|
cc0-1.0
| 2,574
|
from datetime import datetime
import threading
import time
from collections import deque
import logging
logger = logging.getLogger(__name__)
class MonitorThread(threading.Thread):
"""executes a function f with sleep_time intervals in between
"""
def __init__(self, sleep_time, callback):
threading.Thread.__init__(self)
self.sleep_time = sleep_time
self.callback = callback
self.stop_signal = False
def run(self):
start_time = datetime.utcnow()
while True:
self.callback()
d = datetime.utcnow() - start_time
seconds = (d.microseconds + (d.seconds + d.days * 24 * 3600) * \
(10 ** 6)) / float(10 ** 6)
duration = seconds % self.sleep_time # method execution time
sleep_time = max(0, self.sleep_time - duration)
if self.stop_signal:
break
time.sleep(sleep_time)
def stop(self):
self.stop_signal = True
self.join()
return
@staticmethod
def new_thread(sleep_time, callback):
t = MonitorThread(sleep_time, callback)
t.setDaemon(True)
t.start()
return t
class Monitor(object):
"""Calls a function periodically, saves its output on a list,
together with the call datetime.
Optionally flushes results to disk
"""
def __init__(self, f, sleep_time=10, callback=None):
"""
f: the function whose output to monitor
memory: capacity of Monitor instance, in number of data entries
sleep_time: time between calls to f, in seconds
callback: function to call after every call to f
"""
self.f = f
self.sleep_time = sleep_time
self.external_callback = callback
self.reset_data()
def reset_data(self):
logger.debug("data reset")
self.data = deque()
def add_entry(self, data):
logger.debug("entry added")
d = (datetime.utcnow(), data)
self.data.append(d)
def callback(self):
logger.debug("callback")
if self.external_callback:
self.external_callback(self)
d = self.f()
self.add_entry(d)
def start(self):
logger.debug("starting")
self.thread = MonitorThread.new_thread(self.sleep_time, self.callback)
def stop(self):
logger.debug("stopping")
assert self.thread
self.thread.stop()
def limit_memory_callback( n_cells, monitor):
'''make a partial of this function with the desired n_cells and set
it as a callback of Monitor to limit memory capacity'''
logger.debug("limit_memory_callback called")
extra= len(monitor.data)-n_cells
if extra>0:
monitor.data= monitor.data[extra:]
def each_interval_callback( other_callback, interval_name, monitor ):
'''make a partial of this function with a function and a
interval_name (that is a property of datetime) and set
it (the partial) as a callback of Monitor to have it called once
each interval_name'''
logger.debug("each_interval_callback called")
if len(monitor.data)>1:
a=getattr(monitor.data[-2][0], interval_name)
b=getattr(monitor.data[-1][0], interval_name)
if a!=b:
other_callback( monitor )
|
goncalopp/mexbtcapi
|
mexbtcapi/util/deprecated/monitor.py
|
Python
|
cc0-1.0
| 3,325
|
#! /usr/bin/env python
"""
This script produces quality plots to check that the LFs are fine compared to simumlations.
"""
import sys
import os
from os.path import join
data_dir = os.environ['DATA_DIR']
import glob
from lib_plot import *
#from lineListAir import *
SNlim = 5
# "D:\data\LF-O\LFmodels\data\trends_color_mag\O2_3728-VVDSDEEPI24-z0.947.txt"
plotDir="/home/comparat/database/Simulations/galform-lightcone/products/emissionLineLuminosityFunctions/plots/"
dir="/home/comparat/database/Simulations/galform-lightcone/products/emissionLineLuminosityFunctions/O2_3728/"
lf_measurement_files_ref=n.array(glob.glob(dir+"*MagLimR-24.2-z0.7*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*MagLimR-*z0.7*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
phiRatio = n.empty([ len(lf_measurement_files), len(dataRef[0]) ])
label = n.array([ "R<23.0","R<23.5", "R<24.2"])
for ii,el in enumerate(lf_measurement_files) :
data= n.loadtxt( el, unpack=True)
phiRatio[ii] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((1e40,1e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=2)
p.savefig(join(plotDir,"trends_O2_3728_Rmag-z0.7.pdf"))
p.clf()
lf_measurement_files_ref=n.array(glob.glob(dir+"*MagLimR-24.2-z0.9*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*MagLimR-*z0.9*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
phiRatio = n.empty([ len(lf_measurement_files), len(dataRef[0]) ])
label = n.array([ "R<23.0","R<23.5", "R<24.2"])
for ii,el in enumerate(lf_measurement_files) :
data= n.loadtxt( el, unpack=True)
phiRatio[ii] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((1e40,1e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=2)
p.savefig(join(plotDir,"trends_O2_3728_Rmag-z0.9.pdf"))
p.clf()
lf_measurement_files_ref=n.array(glob.glob(dir+"*MagLimR-24.2-z1.*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*MagLimR-*z1.*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
phiRatio = n.empty([ len(lf_measurement_files), len(dataRef[0]) ])
label = n.array([ "R<23.0","R<23.5", "R<24.2"])
for ii,el in enumerate(lf_measurement_files) :
data= n.loadtxt( el, unpack=True)
phiRatio[ii] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((1e40,1e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=2)
p.savefig(join(plotDir,"trends_O2_3728_Rmag-z1.2.pdf"))
p.clf()
########################################33
lf_measurement_files_ref=n.array(glob.glob(dir+"*MagLimI-24-z1.*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*MagLimI-*z1.*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
phiRatio = n.empty([ len(lf_measurement_files), len(dataRef[0]) ])
label = n.array([ "I<22.5", "I<23.0","I<23.5","I<24.0"])
for jj,el in enumerate(lf_measurement_files) :
data= n.loadtxt( el, unpack=True)
phiRatio[jj] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((1e40,1e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=2)
p.savefig(join(plotDir,"trends_O2_3728_Imag-z1.2.pdf"))
p.clf()
lf_measurement_files_ref=n.array(glob.glob(dir+"*MagLimI-24-z0.9*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*MagLimI-*z0.9*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
phiRatio = n.empty([ len(lf_measurement_files), len(dataRef[0]) ])
label = n.array([ "I<22.5", "I<23.0","I<23.5","I<24.0"])
for jj,el in enumerate(lf_measurement_files) :
data= n.loadtxt( el, unpack=True)
phiRatio[jj] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((1e40,1e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=2)
p.savefig(join(plotDir,"trends_O2_3728_Imag-z0.9.pdf"))
p.clf()
lf_measurement_files_ref=n.array(glob.glob(dir+"*MagLimI-24-z0.7*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*MagLimI-*z0.7*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
phiRatio = n.empty([ len(lf_measurement_files), len(dataRef[0]) ])
label = n.array([ "I<22.5", "I<23.0","I<23.5","I<24.0"])
for jj,el in enumerate(lf_measurement_files):
data= n.loadtxt( el, unpack=True)
phiRatio[jj] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((1e40,1e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=2)
p.savefig(join(plotDir,"trends_O2_3728_Imag-z0.75.pdf"))
p.clf()
#####################################3
#####################################3
# R-Z
#####################################3
#####################################3
lf_measurement_files_ref=n.array(glob.glob(dir+"*VVDSrz_gt_0.0-z0.7*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*VVDSrz_?t_*z0.7*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
label = n.array(["r-z>0", "r-z>0.5", "r-z>1", "r-z>1.5", "r-z<1", "r-z<1.5", "r-z<2"])
phiRatio = n.empty([ 7, len(dataRef[0]) ])
for ii, el in enumerate(lf_measurement_files):
data= n.loadtxt( el, unpack=True)
phiRatio[ii] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((7e40,5e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=4)
p.savefig(join(plotDir,"trends_O2_3728_I22.5_RZ-z0.75.pdf"))
p.clf()
lf_measurement_files_ref=n.array(glob.glob(dir+"*VVDSrz_gt_0.0-z0.9*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*VVDSrz_?t_*z0.9*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
phiRatio = n.empty([ 7, len(dataRef[0]) ])
for ii, el in enumerate(lf_measurement_files):
data= n.loadtxt( el, unpack=True)
phiRatio[ii] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((7e40,5e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=4)
p.savefig(join(plotDir,"trends_O2_3728_I22.5_RZ-z0.9.pdf"))
p.clf()
lf_measurement_files_ref=n.array(glob.glob(dir+"*VVDSrz_gt_0.0-z1.*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*VVDSrz_?t_*z1.*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
phiRatio = n.empty([ 7, len(dataRef[0]) ])
for ii, el in enumerate(lf_measurement_files):
data= n.loadtxt( el, unpack=True)
phiRatio[ii] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((7e40,5e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=4)
p.savefig(join(plotDir,"trends_O2_3728_I22.5_RZ-z1.2.pdf"))
p.clf()
#####################################3
#####################################3
# G-R
#####################################3
#####################################3
lf_measurement_files_ref=n.array(glob.glob(dir+"*VVDSgr_gt_0.0-z0.7*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*VVDSgr_?t_*z0.7*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
label = n.array(["g-r>0", "g-r>0.5", "g-r>1", "g-r>1.5", "g-r<1", "g-r<1.5", "g-r<2"])
phiRatio = n.empty([ 7, len(dataRef[0]) ])
for ii, el in enumerate(lf_measurement_files):
data= n.loadtxt( el, unpack=True)
phiRatio[ii] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((7e40,5e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=4)
p.savefig(join(plotDir,"trends_O2_3728_I22.5_GR-z0.75.pdf"))
p.clf()
lf_measurement_files_ref=n.array(glob.glob(dir+"*VVDSgr_gt_0.0-z0.9*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*VVDSgr_?t_*z0.9*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
label = n.array(["g-r>0", "g-r>0.5", "g-r>1", "g-r>1.5", "g-r<1", "g-r<1.5", "g-r<2"])
phiRatio = n.empty([ 7, len(dataRef[0]) ])
for ii, el in enumerate(lf_measurement_files):
data= n.loadtxt( el, unpack=True)
phiRatio[ii] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((7e40,5e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=4)
p.savefig(join(plotDir,"trends_O2_3728_I22.5_GR-z0.9.pdf"))
p.clf()
lf_measurement_files_ref=n.array(glob.glob(dir+"*VVDSgr_gt_0.0-z1.*.txt"))
lf_measurement_files=n.array(glob.glob(dir+"*VVDSgr_?t_*z1.*.txt"))
lf_measurement_files.sort()
dataRef = n.loadtxt( lf_measurement_files_ref[0], unpack=True)
label = n.array(["g-r>0", "g-r>0.5", "g-r>1", "g-r>1.5", "g-r<1", "g-r<1.5", "g-r<2"])
phiRatio = n.empty([ 7, len(dataRef[0]) ])
for ii, el in enumerate(lf_measurement_files):
data= n.loadtxt( el, unpack=True)
phiRatio[ii] = data[3] / dataRef[3]
imin = n.argmax(dataRef[6])-1
p.figure(0,(6,6))
for jj in range(len(label)):
p.plot(dataRef[2][imin:],phiRatio[jj][imin:],label=label[jj])
p.xlabel(r'$log_{10}(L[O_{II}])$ [erg s$^{-1}$]')
p.ylabel(r'$\Phi/\Phi_{ref}$')
p.xscale('log')
p.xlim((7e40,5e43))
p.ylim((-0.05,1.05))
p.grid()
p.legend(loc=4)
p.savefig(join(plotDir,"trends_O2_3728_I22.5_GR-z1.2.pdf"))
p.clf()
|
JohanComparat/nbody-npt-functions
|
bin/bin_galform/plotLFs-color-mag-trends.py
|
Python
|
cc0-1.0
| 11,045
|
import ast
from python_minifier.rename.binding import NameBinding
from python_minifier.rename.name_generator import name_filter
from python_minifier.rename.util import is_namespace
def all_bindings(node):
"""
All bindings in a module
:param node: The module to get bindings in
:type node: :class:`ast.AST`
:rtype: Iterable[ast.AST, Binding]
"""
if is_namespace(node):
for binding in node.bindings:
yield node, binding
for child in ast.iter_child_nodes(node):
for namespace, binding in all_bindings(child):
yield namespace, binding
def sorted_bindings(module):
"""
All bindings in a modules sorted by descending number of references
:param module: The module to get bindings in
:type module: :class:`ast.AST`
:rtype: Iterable[ast.AST, Binding]
"""
def comp(tup):
namespace, binding = tup
return len(binding.references)
return sorted(all_bindings(module), key=comp, reverse=True)
def reservation_scope(namespace, binding):
"""
Get the namespaces that are in the bindings reservation scope
Returns the namespace nodes the binding name must be resolvable in
:param namespace: The local namespace of a binding
:type namespace: :class:`ast.AST`
:param binding: The binding to get the reservation scope for
:type binding: Binding
:rtype: set[ast.AST]
"""
namespaces = set([namespace])
for node in binding.references:
while node is not namespace:
namespaces.add(node.namespace)
node = node.namespace
return namespaces
def add_assigned(node):
"""
Add the assigned_names attribute to namespace nodes in a module
:param node: The module to add the assigned_names attribute to
:type node: :class:`ast.Module`
"""
if is_namespace(node):
node.assigned_names = set()
for child in ast.iter_child_nodes(node):
add_assigned(child)
def reserve_name(name, reservation_scope):
"""
Reserve a name in a reservation scope
:param str name: The name to reserve
:param reservation_scope:
:type reservation_scope: Iterable[:class:`ast.AST`]
"""
for namespace in reservation_scope:
namespace.assigned_names.add(name)
class UniqueNameAssigner(object):
"""
Assign new names to renamed bindings
Assigns a unique name to every binding
"""
def __init__(self):
self.name_generator = name_filter()
self.names = []
def available_name(self):
return next(self.name_generator)
def __call__(self, module):
assert isinstance(module, ast.Module)
for namespace, binding in sorted_bindings(module):
if binding.allow_rename:
binding.new_name = self.available_name()
return module
class NameAssigner(object):
"""
Assign new names to renamed bindings
This assigner creates a name 'reservation scope' containing each namespace a binding is referenced in, including
transitive namespaces. Bindings are then assigned the first available name that has no references in their
reservation scope. This means names will be reused in sibling namespaces, and shadowed where possible in child
namespaces.
Bindings are assigned names in order of most references, with names assigned shortest first.
"""
def __init__(self, name_generator=None):
self.name_generator = name_generator if name_generator is not None else name_filter()
self.names = []
def iter_names(self):
for name in self.names:
yield name
while True:
name = next(self.name_generator)
self.names.append(name)
yield name
def available_name(self, reservation_scope, prefix=''):
"""
Search for the first name that is not in reservation scope
"""
for name in self.iter_names():
if self.is_available(prefix + name, reservation_scope):
return prefix + name
def is_available(self, name, reservation_scope):
"""
Is a name unreserved in a reservation scope
:param str name: the name to check availability of
:param reservation_scope: The scope to check
:type reservation_scope: Iterable[:class:`ast.AST`]
:rtype: bool
"""
for namespace in reservation_scope:
if name in namespace.assigned_names:
return False
return True
def __call__(self, module, prefix_globals, reserved_globals=None):
assert isinstance(module, ast.Module)
add_assigned(module)
for namespace, binding in all_bindings(module):
if binding.reserved is not None:
scope = reservation_scope(namespace, binding)
reserve_name(binding.reserved, scope)
if reserved_globals is not None:
for name in reserved_globals:
module.assigned_names.add(name)
for namespace, binding in sorted_bindings(module):
scope = reservation_scope(namespace, binding)
if binding.allow_rename:
if isinstance(namespace, ast.Module) and prefix_globals:
name = self.available_name(scope, prefix='_')
else:
name = self.available_name(scope)
def should_rename():
if binding.should_rename(name):
return True
# It's no longer efficient to do this rename
if isinstance(binding, NameBinding):
# Check that the original name is still available
if binding.reserved == binding.name:
# We already reserved it (this is probably an arg)
return False
if not self.is_available(binding.name, scope):
# The original name has already been assigned to another binding,
# so we need to rename this anyway.
return True
return False
if should_rename():
binding.rename(name)
else:
# Any existing name will become reserved
binding.disallow_rename()
if binding.name is not None:
reserve_name(binding.name, scope)
return module
def rename(module, prefix_globals=False, preserved_globals=None):
NameAssigner()(module, prefix_globals, preserved_globals)
|
listyque/TACTIC-Handler
|
thlib/side/python_minifier/rename/renamer.py
|
Python
|
epl-1.0
| 6,672
|
#VERSION: 2.14
#AUTHORS: Diego de las Heras (diegodelasheras@gmail.com)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
from HTMLParser import HTMLParser
from urllib import urlencode
class torrentz(object):
# mandatory properties
url = 'https://torrentz.eu'
name = 'Torrentz'
supported_categories = {'all': ''}
trackers_list = ['udp://open.demonii.com:1337/announce',
'udp://tracker.leechers-paradise.org:6969',
'udp://exodus.desync.com:6969',
'udp://tracker.coppersurfer.tk:6969',
'udp://9.rarbg.com:2710/announce']
class MyHtmlParser(HTMLParser):
def __init__(self, results, url, trackers):
HTMLParser.__init__(self)
self.results = results
self.url = url
self.trackers = trackers
self.td_counter = None
self.current_item = None
def handle_starttag(self, tag, attrs):
if tag == 'a':
params = dict(attrs)
if 'href' in params:
self.current_item = {}
self.td_counter = 0
self.current_item['link'] = 'magnet:?xt=urn:btih:' + \
params['href'].strip(' /') + self.trackers
self.current_item['desc_link'] = self.url + params['href'].strip()
elif tag == 'span':
if isinstance(self.td_counter,int):
self.td_counter += 1
if self.td_counter > 6: # safety
self.td_counter = None
def handle_data(self, data):
if self.td_counter == 0:
if 'name' not in self.current_item:
self.current_item['name'] = ''
self.current_item['name'] += data
elif self.td_counter == 4:
if 'size' not in self.current_item:
self.current_item['size'] = data.strip()
elif self.td_counter == 5:
if 'seeds' not in self.current_item:
self.current_item['seeds'] = data.strip().replace(',', '')
elif self.td_counter == 6:
if 'leech' not in self.current_item:
self.current_item['leech'] = data.strip().replace(',', '')
# display item
self.td_counter = None
self.current_item['engine_url'] = self.url
if self.current_item['name'].find(' \xc2'):
self.current_item['name'] = self.current_item['name'].split(' \xc2')[0]
self.current_item['link'] += '&' + urlencode({'dn' : self.current_item['name']})
if not self.current_item['seeds'].isdigit():
self.current_item['seeds'] = 0
if not self.current_item['leech'].isdigit():
self.current_item['leech'] = 0
prettyPrinter(self.current_item)
self.results.append('a')
def download_torrent(self, info):
print(download_file(info))
def search(self, what, cat='all'):
# initialize trackers for magnet links
trackers = '&' + '&'.join(urlencode({'tr' : tracker}) for tracker in self.trackers_list)
i = 0
while i < 6:
results_list = []
# "what" is already urlencoded
html = retrieve_url('%s/any?f=%s&p=%d' % (self.url, what, i))
parser = self.MyHtmlParser(results_list, self.url, trackers)
parser.feed(html)
parser.close()
if len(results_list) < 1:
break
i += 1
|
ppolewicz/qBittorrent
|
src/searchengine/nova/engines/torrentz.py
|
Python
|
gpl-2.0
| 5,199
|