max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
conan/test_package/test.py | SaeidSamadi/Tasks | 60 | 12763551 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012-2019 CNRS-UM LIRMM, CNRS-AIST JRL
#
from __future__ import print_function
import math
import eigen
import sva
import rbdyn
import tasks
import sch
def makeZXZArm(isFixed = True, X_base = sva.PTransformd.Identity()):
mbg = rbdyn.MultiBodyGraph()
mass = 1.0
I = eigen.Matrix3d.Identity()
h = eigen.Vector3d.Zero()
rbi = sva.RBInertiad(mass, h, I)
b0 = rbdyn.Body(rbi, "b0")
b1 = rbdyn.Body(rbi, "b1")
b2 = rbdyn.Body(rbi, "b2")
b3 = rbdyn.Body(rbi, "b3")
mbg.addBody(b0)
mbg.addBody(b1)
mbg.addBody(b2)
mbg.addBody(b3)
j0 = rbdyn.Joint(rbdyn.Joint.Rev, eigen.Vector3d.UnitZ(), True, "j0")
j1 = rbdyn.Joint(rbdyn.Joint.Rev, eigen.Vector3d.UnitX(), True, "j1")
j2 = rbdyn.Joint(rbdyn.Joint.Rev, eigen.Vector3d.UnitZ(), True, "j2")
mbg.addJoint(j0)
mbg.addJoint(j1)
mbg.addJoint(j2)
to = sva.PTransformd(eigen.Vector3d(0, 0.5, 0))
_from = sva.PTransformd(eigen.Vector3d(0, 0, 0))
mbg.linkBodies("b0", sva.PTransformd.Identity(), "b1", _from, "j0")
mbg.linkBodies("b1", to, "b2", _from, "j1")
mbg.linkBodies("b2", to, "b3", _from, "j2")
mb = mbg.makeMultiBody("b0", isFixed, X_base)
mbc = rbdyn.MultiBodyConfig(mb)
mbc.zero(mb)
return mb,mbc
if __name__ == "__main__":
nrIter = 10000
mb, mbcInit = makeZXZArm()
rbdyn.forwardKinematics(mb, mbcInit)
rbdyn.forwardVelocity(mb, mbcInit)
mbs = rbdyn.MultiBodyVector([mb])
mbcs = rbdyn.MultiBodyConfigVector([mbcInit])
solver = tasks.qp.QPSolver()
solver.nrVars(mbs, [], [])
solver.updateConstrSize()
posD = eigen.Vector3d(0.707106, 0.707106, 0.0)
posTask = tasks.qp.PositionTask(mbs, 0, "b3", posD)
posTaskSp = tasks.qp.SetPointTask(mbs, 0, posTask, 10, 1)
solver.addTask(posTaskSp)
for i in range(nrIter):
solver.solve(mbs, mbcs)
rbdyn.eulerIntegration(mbs[0], mbcs[0], 0.001)
rbdyn.forwardKinematics(mbs[0], mbcs[0])
rbdyn.forwardVelocity(mbs[0], mbcs[0])
print("(Python) Final norm of position task: {}".format(posTask.eval().norm()))
| 2.03125 | 2 |
tests/classtime/brain/scheduling/test_schedule.py | rosshamish/classtime-implementation | 1 | 12763552 |
import unittest
from classtime.brain.scheduling import Schedule
class TestSchedule(unittest.TestCase): #pylint: disable=R0904
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def test_section_add(self):
def assert_section_add(section, numblocks_expected):
"""
Check that adding a given section to a new Schedule
- fills the expected number of timetable blocks
"""
schedule = Schedule()
schedule.add_section(section)
numblocks = 0
for day in schedule.timetable:
for block in day:
if block is not Schedule.OPEN:
numblocks += 1
assert numblocks == numblocks_expected
testcases = [
{
'day': 'TR',
'startTime': '08:00 AM',
'endTime': '08:50 AM',
'expected': 4
},
{
'day': 'MTWRF',
'startTime': '08:00 AM',
'endTime': '08:50 AM',
'expected': 10
},
{
'day': 'TR',
'startTime': '08:00 AM',
'endTime': '08:50 PM',
'expected': 52
},
{
'day': 'TR',
'startTime': '08:00 AM',
'endTime': '09:20 AM',
'expected': 6
},
{
'day': 'M',
'startTime': '06:00 PM',
'endTime': '08:50 PM',
'expected': 6
}
]
for section in testcases:
assert_section_add(section,
section.get('expected'))
def test_busy_time_add(self):
def assert_busy_time_add(busy_time, numblocks_expected):
"""
Check that adding a given busy_time to a new Schedule
- fills the expected number of timetable blocks
"""
schedule = Schedule()
schedule.add_busy_time(busy_time)
numblocks = 0
for day in schedule.timetable:
for block in day:
if block is not Schedule.OPEN:
numblocks += 1
assert numblocks == numblocks_expected
testcases = [
{
'day': 'TR',
'startTime': '08:00 AM',
'endTime': '08:50 AM',
'expected': 4
},
{
'day': 'MTWRF',
'startTime': '08:00 AM',
'endTime': '08:50 AM',
'expected': 10
},
{
'day': 'TR',
'startTime': '08:00 AM',
'endTime': '08:50 PM',
'expected': 52
},
{
'day': 'TR',
'startTime': '08:00 AM',
'endTime': '09:20 AM',
'expected': 6
},
{
'day': 'M',
'startTime': '06:00 PM',
'endTime': '08:50 PM',
'expected': 6
}
]
for section in testcases:
assert_busy_time_add(section,
section.get('expected'))
def test_busy_time_overlap(self):
"""
See github.com/rosshamish/classtime/issues/96
"""
def assert_busy_time_overlap_doesnt_double(busy_times, numblocks_expected):
"""
Check that adding the same busy_time more than once to a new Schedule
is idempotent
"""
schedule = Schedule()
bitmaps_set = set()
for busy_time in busy_times:
schedule.add_busy_time(busy_time)
bitmaps_set.add(''.join(bin(day_bitmap) for day_bitmap in schedule.timetable_bitmap))
assert len(bitmaps_set) == 1
numblocks = 0
for day in schedule.timetable:
for block in day:
if block is not Schedule.OPEN:
numblocks += 1
assert numblocks == numblocks_expected
testcases = [
{
'busy_times': [
{
'day': 'W',
'startTime': '08:00 AM',
'endTime': '08:20 AM',
},
{
'day': 'W',
'startTime': '08:00 AM',
'endTime': '08:20 AM',
}
],
'expected': 1
}
]
for testcase in testcases:
assert_busy_time_overlap_doesnt_double(testcase.get('busy_times'),
testcase.get('expected'))
def test_conflict_recognition(self): #pylint: disable=R0201
def assert_conflict_recognition(sections, has_conflict):
"""
Assert that a list of sections has either:
- one or more conflicts, or
- no conflicts
"""
schedule = Schedule()
for section in sections:
if schedule.conflicts(section):
assert has_conflict == True
return
else:
schedule.add_section(section)
assert has_conflict == False
testcases = [
{
'expected': True,
'sections':
[
{
'day': 'TR',
'startTime': '08:00 AM',
'endTime': '08:50 AM'
},
{
'day': 'MTWRF',
'startTime': '08:00 AM',
'endTime': '08:50 AM'
}
]
},
{
'expected': False,
'sections':
[
{
'day': 'TR',
'startTime': '08:00 AM',
'endTime': '08:50 AM'
},
{
'day': 'TR',
'startTime': '09:00 AM',
'endTime': '09:50 AM'
}
]
},
{
'expected': False,
'sections':
[
{
'day': 'TR',
'startTime': '07:00 PM',
'endTime': '07:50 PM'
},
{
'day': 'TR',
'startTime': '08:00 PM',
'endTime': '09:00 PM'
}
]
},
{
'expected': True,
'sections':
[
{
'day': 'TR',
'startTime': '07:00 PM',
'endTime': '07:50 PM'
},
{
'day': 'TR',
'startTime': '08:00 PM',
'endTime': '09:00 PM'
},
{
'day': 'TR',
'startTime': '08:30 PM',
'endTime': '08:50 PM'
}
]
}
]
for scenario in testcases:
assert_conflict_recognition(scenario.get('sections'),
scenario.get('expected'))
def test_comparison_called_but_no_courses(self):
sched = Schedule(sections=[], preferences={'no-marathons': 1})
sched.is_similar(Schedule())
def test_preferences_null_values():
sched = Schedule(preferences={ 'no-marathons': None })
sched.overall_score() # should not raise an exception
| 3.265625 | 3 |
nasdaqdatalink/model/data_list.py | edvn0/data-link-python | 1,178 | 12763553 | from .model_list import ModelList
from .data_mixin import DataMixin
class DataList(DataMixin, ModelList):
pass
| 1.226563 | 1 |
puput/wagtail_hooks.py | UCBerkeleySETI/puput | 554 | 12763554 | <gh_stars>100-1000
import wagtail.admin.rich_text.editors.draftail.features as draftail_features
from wagtail.admin.rich_text.converters.html_to_contentstate import InlineStyleElementHandler, BlockElementHandler
from wagtail.core import hooks
@hooks.register('register_rich_text_features')
def register_blockquote_feature(features):
"""
Registering the `blockquote` feature, which uses the `blockquote` Draft.js block type,
and is stored as HTML with a `<blockquote>` tag.
"""
feature_name = 'blockquote'
type_ = 'blockquote'
tag = 'blockquote'
control = {
'type': type_,
'label': '❝',
'description': 'Quote',
'element': 'blockquote',
}
features.register_editor_plugin(
'draftail',
feature_name,
draftail_features.BlockFeature(control)
)
features.register_converter_rule(
'contentstate',
feature_name,
{
'from_database_format': {tag: BlockElementHandler(type_)},
'to_database_format': {'block_map': {type_: tag}},
}
)
features.default_features.append(feature_name)
@hooks.register('register_rich_text_features')
def register_codeline_feature(features):
feature_name = 'Code Line'
type_ = 'CODE'
tag = 'code'
control = {
'type': type_,
'label': '>_',
'description': 'Code Line',
}
features.register_editor_plugin(
'draftail', feature_name, draftail_features.InlineStyleFeature(control)
)
db_conversion = {
'from_database_format': {tag: InlineStyleElementHandler(type_)},
'to_database_format': {'style_map': {type_: tag}},
}
features.register_converter_rule('contentstate', feature_name, db_conversion)
features.default_features.append(feature_name)
| 2.0625 | 2 |
tests/test_add.py | GoodRx/pytest-mark-no-py3 | 3 | 12763555 | from textwrap import dedent
import pytest
from pytest_mark_no_py3.add import main
@pytest.mark.parametrize("directory", [True, False])
def test_main_no_results_file(directory, runner, testdir):
pyfile = testdir.makepyfile(
dedent(
"""\
@pytest.mark.no_py3
def test_already_marked():
assert False
def test_will_be_marked():
assert False
"""
)
)
expected = dedent(
"""\
@pytest.mark.no_py3
def test_already_marked():
assert False
@pytest.mark.no_py3
def test_will_be_marked():
assert False"""
)
if directory:
path = pyfile.dirname
else:
path = pyfile
result = runner.invoke(main, ["--no-interactive", str(path)])
assert result.exit_code == 0
assert pyfile.read() == expected
def test_main_with_results_file(runner, testdir):
pyfile = testdir.makepyfile(
dedent(
"""\
import pytest
@pytest.mark.no_py3
def test_already_marked():
assert False
def test_will_be_marked():
assert False
def test_passes():
assert True
"""
)
)
expected = dedent(
"""\
import pytest
@pytest.mark.no_py3
def test_already_marked():
assert False
@pytest.mark.no_py3
def test_will_be_marked():
assert False
def test_passes():
assert True"""
)
result_log = testdir.tmpdir.join("results.txt")
testdir.runpytest("--result-log=%s" % result_log)
result = runner.invoke(
main, ["--no-interactive", "--result-log=%s" % result_log, pyfile.basename]
)
assert result.exit_code == 0
assert pyfile.read() == expected
| 2.265625 | 2 |
thirdparty/src/opencc/setup.py | kanskinson/librime | 0 | 12763556 | import os
import re
import subprocess
import sys
import warnings
import setuptools
import setuptools.command.build_ext
import wheel.bdist_wheel
_this_dir = os.path.dirname(os.path.abspath(__file__))
_clib_dir = os.path.join(_this_dir, 'python', 'opencc', 'clib')
_build_dir = os.path.join(_this_dir, 'build', 'python')
_cmake_file = os.path.join(_this_dir, 'CMakeLists.txt')
try:
sys.path.insert(0, os.path.join(_this_dir, 'python'))
import opencc # noqa
_libopencc_built = True
except ImportError:
_libopencc_built = False
def get_version_info():
version_info = ['1', '0', '0']
version_pattern = re.compile(
r'OPENCC_VERSION_(MAJOR|MINOR|REVISION) (\d+)')
with open(_cmake_file, 'rb') as f:
for l in f:
match = version_pattern.search(l.decode('utf-8'))
if not match:
continue
if match.group(1) == 'MAJOR':
version_info[0] = match.group(2)
elif match.group(1) == 'MINOR':
version_info[1] = match.group(2)
elif match.group(1) == 'REVISION':
version_info[2] = match.group(2)
version = '.'.join(version_info)
return version
def get_author_info():
if not os.path.isfile(_author_file):
return 'BYVoid', '<EMAIL>'
authors = []
emails = []
author_pattern = re.compile(r'(.+) <(.+)>')
with open(_author_file, 'rb') as f:
for line in f:
match = author_pattern.search(line.decode('utf-8'))
if not match:
continue
authors.append(match.group(1))
emails.append(match.group(2))
if len(authors) == 0:
return 'BYVoid', '<EMAIL>'
return ', '.join(authors), ', '.join(emails)
def get_long_description():
with open(_readme_file, 'rb') as f:
return f.read().decode('utf-8')
def build_libopencc():
if _libopencc_built:
return # Skip building binary file
print('building libopencc into %s' % _build_dir)
is_windows = sys.platform == 'win32'
# Make build directories
if is_windows:
subprocess.call('md {}'.format(_build_dir), shell=True)
subprocess.call('md {}'.format(_clib_dir), shell=True)
else:
subprocess.call('mkdir -p {}'.format(_build_dir), shell=True)
subprocess.call('mkdir -p {}'.format(_clib_dir), shell=True)
# Configure
cmake_args = [
'-DBUILD_DOCUMENTATION:BOOL=OFF',
'-DBUILD_SHARED_LIBS:BOOL=OFF',
'-DENABLE_GTEST:BOOL=OFF',
'-DENABLE_BENCHMARK:BOOL=OFF',
'-DBUILD_PYTHON:BOOL=ON',
'-DCMAKE_BUILD_TYPE=Release',
'-DCMAKE_INSTALL_PREFIX={}'.format(_clib_dir),
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}'.format(_clib_dir),
'-DPYTHON_EXECUTABLE={}'.format(sys.executable),
]
if is_windows:
cmake_args += \
['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_RELEASE={}'.format(_clib_dir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
cmd = ['cmake', '-B', _build_dir] + cmake_args
errno = subprocess.call(cmd)
assert errno == 0, 'Configure failed'
# Build
cmd = [
'cmake', '--build', _build_dir,
'--config', 'Release',
'--target', 'install'
]
errno = subprocess.call(cmd)
assert errno == 0, 'Build failed'
# Empty __init__.py file has to be created
# to make opencc.clib a module
with open('{}/__init__.py'.format(_clib_dir), 'w'):
pass
class OpenCCExtension(setuptools.Extension, object):
def __init__(self, name, sourcedir=''):
setuptools.Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class BuildExtCommand(setuptools.command.build_ext.build_ext, object):
def build_extension(self, ext):
if isinstance(ext, OpenCCExtension):
build_libopencc()
else:
super(BuildExtCommand, self).build_extension(ext)
class BDistWheelCommand(wheel.bdist_wheel.bdist_wheel, object):
"""Custom bdsit_wheel command that will change
default plat-name based on PEP 425 and PEP 513
"""
@staticmethod
def _determine_platform_tag():
if sys.platform == 'win32':
if 'amd64' in sys.version.lower():
return 'win-amd64'
return sys.platform
if sys.platform == 'darwin':
_, _, _, _, machine = os.uname()
return 'macosx-10.9-{}'.format(machine)
if os.name == 'posix':
_, _, _, _, machine = os.uname()
return 'manylinux1-{}'.format(machine)
warnings.warn(
'Windows macos and linux are all not detected, '
'Proper distribution name cannot be determined.')
from distutils.util import get_platform
return get_platform()
def initialize_options(self):
super(BDistWheelCommand, self).initialize_options()
self.plat_name = self._determine_platform_tag()
packages = ['opencc', 'opencc.clib']
version_info = get_version_info()
author_info = get_author_info()
setuptools.setup(
name='OpenCC',
version=version_info,
author=author_info[0],
author_email=author_info[1],
description=" Conversion between Traditional and Simplified Chinese",
long_description=get_long_description(),
long_description_content_type="text/markdown",
url="https://github.com/BYVoid/OpenCC",
packages=packages,
package_dir={'opencc': 'python/opencc'},
package_data={str('opencc'): [
'clib/opencc_clib*',
'clib/share/opencc/*',
]},
ext_modules=[OpenCCExtension('opencc.clib.opencc_clib', 'python')],
cmdclass={
'build_ext': BuildExtCommand,
'bdist_wheel': BDistWheelCommand
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Natural Language :: Chinese (Simplified)',
'Natural Language :: Chinese (Traditional)',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Localization',
'Topic :: Text Processing :: Linguistic',
],
license='Apache License 2.0',
keywords=['opencc', 'convert', 'chinese']
)
| 1.914063 | 2 |
packaging/setup/ovirt_engine_setup/engine_common/database.py | UranusBlockStack/ovirt-engine | 0 | 12763557 | <filename>packaging/setup/ovirt_engine_setup/engine_common/database.py
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import atexit
import datetime
import gettext
import os
import re
import socket
import tempfile
import psycopg2
from otopi import base, util
from ovirt_engine import util as outil
from ovirt_engine_setup import util as osetuputil
from ovirt_engine_setup import dialog
from ovirt_engine_setup.engine_common import constants as oengcommcons
DEK = oengcommcons.DBEnvKeysConst
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
def _ind_env(inst, keykey):
return inst.environment[inst._dbenvkeys[keykey]]
@util.export
class Statement(base.Base):
@property
def environment(self):
return self._environment
def __init__(
self,
dbenvkeys,
environment,
):
super(Statement, self).__init__()
self._environment = environment
if not set(DEK.REQUIRED_KEYS) <= set(dbenvkeys.keys()):
raise RuntimeError(
_('Missing required db env keys: {keys}').format(
keys=list(set(DEK.REQUIRED_KEYS) - set(dbenvkeys.keys())),
)
)
self._dbenvkeys = dbenvkeys
def connect(
self,
host=None,
port=None,
secured=None,
securedHostValidation=None,
user=None,
password=<PASSWORD>,
database=None,
):
if host is None:
host = _ind_env(self, DEK.HOST)
if port is None:
port = _ind_env(self, DEK.PORT)
if secured is None:
secured = _ind_env(self, DEK.SECURED)
if securedHostValidation is None:
securedHostValidation = _ind_env(self, DEK.HOST_VALIDATION)
if user is None:
user = _ind_env(self, DEK.USER)
if password is None:
password = _ind_env(self, DEK.PASSWORD)
if database is None:
database = _ind_env(self, DEK.DATABASE)
sslmode = 'allow'
if secured:
if securedHostValidation:
sslmode = 'verify-full'
else:
sslmode = 'require'
#
# old psycopg2 does not know how to ignore
# uselss parameters
#
if not host:
connection = psycopg2.connect(
database=database,
)
else:
#
# port cast is required as old psycopg2
# does not support unicode strings for port.
# do not cast to int to avoid breaking usock.
#
connection = psycopg2.connect(
host=host,
port=str(port),
user=user,
password=password,
database=database,
sslmode=sslmode,
)
return connection
def execute(
self,
statement,
args=dict(),
host=None,
port=None,
secured=None,
securedHostValidation=None,
user=None,
password=<PASSWORD>,
database=None,
ownConnection=False,
transaction=True,
):
# autocommit member is available at >= 2.4.2
def __backup_autocommit(connection):
if hasattr(connection, 'autocommit'):
return connection.autocommit
else:
return connection.isolation_level
def __restore_autocommit(connection, v):
if hasattr(connection, 'autocommit'):
connection.autocommit = v
else:
connection.set_isolation_level(v)
def __set_autocommit(connection, autocommit):
if hasattr(connection, 'autocommit'):
connection.autocommit = autocommit
else:
connection.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
if autocommit
else
psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED
)
ret = []
old_autocommit = None
_connection = None
cursor = None
try:
self.logger.debug(
"Database: '%s', Statement: '%s', args: %s",
database,
statement,
args,
)
if not ownConnection:
connection = _ind_env(self, DEK.CONNECTION)
else:
self.logger.debug('Creating own connection')
_connection = connection = self.connect(
host=host,
port=port,
secured=secured,
securedHostValidation=securedHostValidation,
user=user,
password=password,
database=database,
)
if not transaction:
old_autocommit = __backup_autocommit(connection)
__set_autocommit(connection, True)
cursor = connection.cursor()
cursor.execute(
statement,
args,
)
if cursor.description is not None:
cols = [d[0] for d in cursor.description]
while True:
entry = cursor.fetchone()
if entry is None:
break
ret.append(dict(zip(cols, entry)))
except:
if _connection is not None:
_connection.rollback()
raise
else:
if _connection is not None:
_connection.commit()
finally:
if old_autocommit is not None and connection is not None:
__restore_autocommit(connection, old_autocommit)
if cursor is not None:
cursor.close()
if _connection is not None:
_connection.close()
self.logger.debug('Result: %s', ret)
return ret
@util.export
class OvirtUtils(base.Base):
_plainPassword = None
@property
def environment(self):
return self._environment
@property
def command(self):
return self._plugin.command
@property
def dialog(self):
return self._plugin.dialog
def __init__(
self,
plugin,
dbenvkeys,
environment=None,
):
super(OvirtUtils, self).__init__()
self._plugin = plugin
self._environment = (
self._plugin.environment
if environment is None
else environment
)
if not set(DEK.REQUIRED_KEYS) <= set(dbenvkeys.keys()):
raise RuntimeError(
_('Missing required db env keys: {keys}').format(
keys=list(set(DEK.REQUIRED_KEYS) - set(dbenvkeys.keys())),
)
)
self._dbenvkeys = dbenvkeys
def detectCommands(self):
self.command.detect('pg_dump')
self.command.detect('pg_restore')
self.command.detect('psql')
def createPgPass(self):
#
# we need client side psql library
# version as at least in rhel for 8.4
# the password within pgpassfile is
# not escaped.
# the simplest way is to checkout psql
# utility version.
#
if type(self)._plainPassword is None:
rc, stdout, stderr = self._plugin.execute(
args=(
self.command.get('psql'),
'-V',
),
)
type(self)._plainPassword = ' 8.' in stdout[0]
fd, pgpass = tempfile.mkstemp()
atexit.register(os.unlink, pgpass)
with os.fdopen(fd, 'w') as f:
f.write(
(
'# DB USER credentials.\n'
'{host}:{port}:{database}:{user}:{password}\n'
).format(
host=_ind_env(self, DEK.HOST),
port=_ind_env(self, DEK.PORT),
database=_ind_env(self, DEK.DATABASE),
user=_ind_env(self, DEK.USER),
password=(
_ind_env(self, DEK.PASSWORD)
if type(self)._plainPassword
else outil.escape(
_ind_env(self, DEK.PASSWORD),
':\\',
)
),
),
)
self.environment[self._dbenvkeys[DEK.PGPASSFILE]] = pgpass
def tryDatabaseConnect(self, environment=None):
if environment is None:
environment = self.environment
try:
statement = Statement(
environment=environment,
dbenvkeys=self._dbenvkeys,
)
statement.execute(
statement="""
select 1
""",
ownConnection=True,
transaction=False,
)
self.logger.debug('Connection succeeded')
except psycopg2.OperationalError as e:
self.logger.debug('Connection failed', exc_info=True)
raise RuntimeError(
_('Cannot connect to database: {error}').format(
error=e,
)
)
def isNewDatabase(
self,
host=None,
port=None,
secured=None,
user=None,
password=<PASSWORD>,
database=None,
):
statement = Statement(
environment=self.environment,
dbenvkeys=self._dbenvkeys,
)
ret = statement.execute(
statement="""
select count(*) as count
from pg_catalog.pg_tables
where schemaname = 'public';
""",
args=dict(),
host=host,
port=port,
secured=secured,
user=user,
password=password,
database=database,
ownConnection=True,
transaction=False,
)
return ret[0]['count'] == 0
def createLanguage(self, language):
statement = Statement(
environment=self.environment,
dbenvkeys=self._dbenvkeys,
)
if statement.execute(
statement="""
select count(*)
from pg_language
where lanname=%(language)s;
""",
args=dict(
language=language,
),
ownConnection=True,
transaction=False,
)[0]['count'] == 0:
statement.execute(
statement=(
"""
create language {language};
"""
).format(
language=language,
),
args=dict(),
ownConnection=True,
transaction=False,
)
def clearDatabase(self):
self.createLanguage('plpgsql')
statement = Statement(
environment=self.environment,
dbenvkeys=self._dbenvkeys,
)
statement.execute(
statement="""
create or replace
function
oesetup_generate_drop_all_syntax()
returns setof text
AS $procedure$ begin
return query
select
'drop function if exists ' ||
ns.nspname ||
'.' ||
proname ||
'(' ||
oidvectortypes(proargtypes) ||
') cascade;'
from
pg_proc inner join pg_namespace ns on (
pg_proc.pronamespace=ns.oid
)
where
ns.nspname = 'public'
union
select
'drop type if exists ' ||
c.relname::information_schema.sql_identifier ||
' ' ||
'cascade;'
from
pg_namespace n, pg_class c, pg_type t
where
n.oid = c.relnamespace and t.typrelid = c.oid and
c.relkind = 'c'::"char" and n.nspname = 'public';
end; $procedure$
language plpgsql;
""",
args=dict(),
ownConnection=True,
transaction=False,
)
spdrops = statement.execute(
statement="""
select oesetup_generate_drop_all_syntax as drop
from oesetup_generate_drop_all_syntax()
""",
ownConnection=True,
transaction=False,
)
for spdrop in [t['drop'] for t in spdrops]:
statement.execute(
statement=spdrop,
ownConnection=True,
transaction=False,
)
tables = statement.execute(
statement="""
select table_name
from information_schema.views
where table_schema = %(schemaname)s
""",
args=dict(
schemaname='public',
),
ownConnection=True,
transaction=False,
)
for view in [t['table_name'] for t in tables]:
statement.execute(
statement=(
"""
drop view if exists {view} cascade
"""
).format(
view=view,
),
ownConnection=True,
transaction=False,
)
seqs = statement.execute(
statement="""
select relname as seqname
from pg_class
where relkind=%(relkind)s
""",
args=dict(
relkind='S',
),
ownConnection=True,
transaction=False,
)
for seq in [t['seqname'] for t in seqs]:
statement.execute(
statement=(
"""
drop sequence if exists {sequence} cascade
"""
).format(
sequence=seq,
),
ownConnection=True,
transaction=False,
)
tables = statement.execute(
statement="""
select tablename
from pg_tables
where schemaname = %(schemaname)s
""",
args=dict(
schemaname='public',
),
ownConnection=True,
transaction=False,
)
for table in [t['tablename'] for t in tables]:
statement.execute(
statement=(
"""
drop table if exists {table} cascade
"""
).format(
table=table,
),
ownConnection=True,
transaction=False,
)
def _backup_restore_filters_info(self):
return {
'gzip': {
'dump': ['gzip'],
'restore': ['zcat'],
},
'bzip2': {
'dump': ['bzip2'],
'restore': ['bzcat'],
},
'xz': {
'dump': ['xz'],
'restore': ['xzcat'],
},
}
def _dump_base_args(self):
return [
self.command.get('pg_dump'),
'-E', 'UTF8',
'--disable-dollar-quoting',
'--disable-triggers',
'-U', _ind_env(self, DEK.USER),
'-h', _ind_env(self, DEK.HOST),
'-p', str(_ind_env(self, DEK.PORT)),
]
def _pg_restore_base_args(self):
return [
'-w',
'-h', _ind_env(self, DEK.HOST),
'-p', str(_ind_env(self, DEK.PORT)),
'-U', _ind_env(self, DEK.USER),
'-d', _ind_env(self, DEK.DATABASE),
]
def _backup_restore_dumpers_info(self, backupfile, database):
# if backupfile is not supplied, we write to stdout
return {
'pg_custom': {
'dump_args': (
self._dump_base_args() +
[
'--format=custom',
] +
(
['--file=%s' % backupfile]
if backupfile else []
) +
[database]
),
'restore_args': (
[self.command.get('pg_restore')] +
self._pg_restore_base_args() +
(
['--jobs=%s' % _ind_env(self, DEK.RESTORE_JOBS)]
if _ind_env(self, DEK.RESTORE_JOBS) and backupfile
else []
) +
(
[backupfile]
if backupfile else []
)
),
},
'pg_plain': {
'dump_args': (
self._dump_base_args() +
[
'--format=plain',
] +
(
['--file=%s' % backupfile]
if backupfile else []
) +
[database]
),
'restore_args': (
[self.command.get('psql')] +
self._pg_restore_base_args() +
(
['--file=%s' % backupfile]
if backupfile else []
)
),
},
}
def backup(
self,
dir,
prefix,
):
database = _ind_env(self, DEK.DATABASE)
fd, backupFile = tempfile.mkstemp(
prefix='%s-%s.' % (
prefix,
datetime.datetime.now().strftime('%Y%m%d%H%M%S')
),
suffix='.dump',
dir=dir,
)
os.close(fd)
self.logger.info(
_("Backing up database {host}:{database} to '{file}'.").format(
host=_ind_env(self, DEK.HOST),
database=database,
file=backupFile,
)
)
filt = _ind_env(self, DEK.FILTER)
f_infos = {}
if filt is not None:
f_infos = self._backup_restore_filters_info()
if filt not in f_infos:
raise RuntimeError(_('Unknown db filter {f}').format(f=filt))
dumper = _ind_env(self, DEK.DUMPER)
d_infos = self._backup_restore_dumpers_info(
None if filt else backupFile,
database
)
if dumper not in d_infos:
raise RuntimeError(_('Unknown db dumper {d}').format(d=dumper))
pipe = [
{
'args': d_infos[dumper]['dump_args'],
}
]
stdout = None
if filt is not None:
pipe.append(
{
'args': f_infos[filt]['dump']
}
)
stdout = open(backupFile, 'w')
res = None
try:
res = self._plugin.executePipeRaw(
pipe,
envAppend={
'PGPASSWORD': '',
'PGPASSFILE': _ind_env(self, DEK.PGPASSFILE),
},
stdout=stdout,
)
finally:
if stdout is not None:
stdout.close()
self.logger.debug('db backup res %s' % res)
if set(r['rc'] for r in res['result']) != set((0,)):
raise RuntimeError(
_(
'Failed to backup database, please check '
'the log file for details'
)
)
return backupFile
_IGNORED_ERRORS = (
# TODO: verify and get rid of all the '.*'s
'.*language "plpgsql" already exists',
'.*must be owner of language plpgsql',
# psql
'ERROR: must be owner of extension plpgsql',
# pg_restore
(
'pg_restore: \[archiver \(db\)\] could not execute query: ERROR: '
'must be owner of extension plpgsql'
),
# older versions of dwh used uuid-ossp, which requires
# special privs, is not used anymore, and emits the following
# errors for normal users.
'.*permission denied for language c',
'.*function public.uuid_generate_v1() does not exist',
'.*function public.uuid_generate_v1mc() does not exist',
'.*function public.uuid_generate_v3(uuid, text) does not exist',
'.*function public.uuid_generate_v4() does not exist',
'.*function public.uuid_generate_v5(uuid, text) does not exist',
'.*function public.uuid_nil() does not exist',
'.*function public.uuid_ns_dns() does not exist',
'.*function public.uuid_ns_oid() does not exist',
'.*function public.uuid_ns_url() does not exist',
'.*function public.uuid_ns_x500() does not exist',
# Other stuff, added because if we want to support other
# formats etc we must explicitely filter all existing output
# and not just ERRORs.
'pg_restore: \[archiver \(db\)\] Error while PROCESSING TOC:',
' Command was: COMMENT ON EXTENSION',
(
'pg_restore: \[archiver \(db\)\] Error from TOC entry \d+'
'; 0 0 COMMENT EXTENSION plpgsql'
),
'pg_restore: WARNING:',
'WARNING: ',
'DETAIL: ',
)
_RE_IGNORED_ERRORS = re.compile(
pattern='|'.join(_IGNORED_ERRORS),
)
def restore(
self,
backupFile,
):
database = _ind_env(self, DEK.DATABASE)
self.logger.info(
_("Restoring file '{file}' to database {host}:{database}.").format(
host=_ind_env(self, DEK.HOST),
database=database,
file=backupFile,
)
)
pipe = []
filt = _ind_env(self, DEK.FILTER)
f_infos = {}
if filt is not None:
f_infos = self._backup_restore_filters_info()
if filt not in f_infos:
raise RuntimeError(_('Unknown db filter {f}').format(f=filt))
stdin = None
if filt is not None:
pipe.append(
{
'args': f_infos[filt]['restore'],
}
)
stdin = open(backupFile, 'r')
dumper = _ind_env(self, DEK.DUMPER)
d_infos = self._backup_restore_dumpers_info(
None if filt else backupFile,
database
)
if dumper not in d_infos:
raise RuntimeError(_('Unknown db dumper {d}').format(d=dumper))
pipe.append(
{
'args': d_infos[dumper]['restore_args'],
}
)
try:
res = self._plugin.executePipeRaw(
pipe,
envAppend={
'PGPASSWORD': '',
'PGPASSFILE': _ind_env(self, DEK.PGPASSFILE),
},
stdin=stdin,
# raiseOnError=False,
)
finally:
if stdin is not None:
stdin.close()
rc = res['result'][-1]['rc']
stderr = res['result'][-1]['stderr'].splitlines()
self.logger.debug('db restore rc %s stderr %s', rc, stderr)
# if (rc != 0) and stderr:
# Do something different for psql/pg_restore?
if stderr:
errors = [
l for l in stderr
if l and not self._RE_IGNORED_ERRORS.match(l)
]
if errors:
self.logger.error(
_(
'Errors while restoring {name} database, please check '
'the log file for details'
).format(
name=database,
)
)
self.logger.debug(
'Errors unfiltered during restore:\n\n%s\n' %
'\n'.join(errors)
)
@staticmethod
def _lower_equal(key, current, expected):
return (
current.strip(' \t"\'').lower() == expected.strip(' \t"\'').lower()
)
@staticmethod
def _error_message(key, current, expected, format_str, name):
return format_str.format(
key=key,
current=current,
expected=expected,
name=name,
)
def _pg_conf_info(self):
return (
{
'key': 'server_encoding',
'expected': 'UTF8',
'ok': self._lower_equal,
'check_on_use': True,
'needed_on_create': False,
'error_msg': _(
'Encoding of the {name} database is {current}. '
'{name} installation is only supported on servers '
'with default encoding set to {expected}. Please fix the '
'default DB encoding before you continue.'
)
},
{
'key': 'max_connections',
'expected': self.environment[
oengcommcons.ProvisioningEnv.POSTGRES_MAX_CONN
],
'ok': lambda key, current, expected: (
int(current) >= int(expected)
),
'check_on_use': True,
'needed_on_create': True,
'error_msg': _(
'{name} requires {key} to be at least {expected}. '
'Please fix {key} before you continue.'
)
},
{
'key': 'listen_addresses',
'expected': self.environment[
oengcommcons.ProvisioningEnv.POSTGRES_LISTEN_ADDRESS
],
'ok': self._lower_equal,
'check_on_use': False,
'needed_on_create': True,
'error_msg': None,
},
{
'key': 'lc_messages',
'expected': self.environment[
oengcommcons.ProvisioningEnv.POSTGRES_LC_MESSAGES
],
'ok': self._lower_equal,
'check_on_use': True,
'needed_on_create': True,
'error_msg': _(
'{name} requires {key} to be {expected}. '
'Please fix {key} before you continue.'
)
},
)
_RE_KEY_VALUE = re.compile(
flags=re.VERBOSE,
pattern=r"""
^
\s*
(?P<key>\w+)
\s*
=
\s*
(?P<value>\w+)
""",
)
def _checkDbConf(self, environment, name):
statement = Statement(
environment=environment,
dbenvkeys=self._dbenvkeys,
)
for item in [
i for i in self._pg_conf_info() if i['check_on_use']
]:
key = item['key']
expected = item['expected']
current = statement.execute(
statement='show {key}'.format(key=key),
ownConnection=True,
transaction=False,
)[0][key]
if not item['ok'](key, current, expected):
raise RuntimeError(
self._error_message(
key=key,
current=current,
expected=expected,
format_str=item['error_msg'],
name=name
)
)
def getUpdatedPGConf(self, content):
needUpdate = True
confs_ok = {}
edit_params = {}
for item in self._pg_conf_info():
key = item['key']
confs_ok[key] = False
if item['needed_on_create']:
edit_params[key] = item['expected']
for l in content:
m = self._RE_KEY_VALUE.match(l)
if m is not None:
for item in [
i for i in self._pg_conf_info()
if i['needed_on_create'] and m.group('key') == i['key']
]:
if item['ok'](
key=key,
current=m.group('value'),
expected=item['expected']
):
confs_ok[item['key']] = True
else:
break
if False not in confs_ok.values():
needUpdate = False
break
if needUpdate:
content = osetuputil.editConfigContent(
content=content,
params=edit_params,
)
return needUpdate, content
def getCredentials(
self,
name,
queryprefix,
defaultdbenvkeys,
show_create_msg=False,
note=None,
credsfile=None,
):
interactive = None in (
_ind_env(self, DEK.HOST),
_ind_env(self, DEK.PORT),
_ind_env(self, DEK.DATABASE),
_ind_env(self, DEK.USER),
_ind_env(self, DEK.PASSWORD),
)
if interactive:
if note is None and credsfile:
note = _(
"\nPlease provide the following credentials for the "
"{name} database.\nThey should be found on the {name} "
"server in '{credsfile}'.\n\n"
).format(
name=name,
credsfile=credsfile,
)
if note:
self.dialog.note(text=note)
if show_create_msg:
self.dialog.note(
text=_(
"\n"
"ATTENTION\n"
"\n"
"Manual action required.\n"
"Please create database for ovirt-engine use. "
"Use the following commands as an example:\n"
"\n"
"create role {user} with login encrypted password "
"'{user}';\n"
"create database {database} owner {user}\n"
" template template0\n"
" encoding 'UTF8' lc_collate 'en_US.UTF-8'\n"
" lc_ctype 'en_US.UTF-8';\n"
"\n"
"Make sure that database can be accessed remotely.\n"
"\n"
).format(
user=defaultdbenvkeys[DEK.USER],
database=defaultdbenvkeys[DEK.DATABASE],
),
)
connectionValid = False
while not connectionValid:
host = _ind_env(self, DEK.HOST)
port = _ind_env(self, DEK.PORT)
secured = _ind_env(self, DEK.SECURED)
securedHostValidation = _ind_env(self, DEK.HOST_VALIDATION)
db = _ind_env(self, DEK.DATABASE)
user = _ind_env(self, DEK.USER)
password = _<PASSWORD>(self, DEK.PASSWORD)
if host is None:
while True:
host = self.dialog.queryString(
name='{qpref}HOST'.format(qpref=queryprefix),
note=_(
'{name} database host [@DEFAULT@]: '
).format(
name=name,
),
prompt=True,
default=defaultdbenvkeys[DEK.HOST],
)
try:
socket.getaddrinfo(host, None)
break # do while missing in python
except socket.error as e:
self.logger.error(
_('Host is invalid: {error}').format(
error=e.strerror
)
)
if port is None:
while True:
try:
port = osetuputil.parsePort(
self.dialog.queryString(
name='{qpref}PORT'.format(qpref=queryprefix),
note=_(
'{name} database port [@DEFAULT@]: '
).format(
name=name,
),
prompt=True,
default=defaultdbenvkeys[DEK.PORT],
)
)
break # do while missing in python
except ValueError:
pass
if secured is None:
secured = dialog.queryBoolean(
dialog=self.dialog,
name='{qpref}SECURED'.format(qpref=queryprefix),
note=_(
'{name} database secured connection (@VALUES@) '
'[@DEFAULT@]: '
).format(
name=name,
),
prompt=True,
default=defaultdbenvkeys[DEK.SECURED],
)
if not secured:
securedHostValidation = False
if securedHostValidation is None:
securedHostValidation = dialog.queryBoolean(
dialog=self.dialog,
name='{qpref}SECURED_HOST_VALIDATION'.format(
qpref=queryprefix
),
note=_(
'{name} database host name validation in secured '
'connection (@VALUES@) [@DEFAULT@]: '
).format(
name=name,
),
prompt=True,
default=True,
) == 'yes'
if db is None:
db = self.dialog.queryString(
name='{qpref}DATABASE'.format(qpref=queryprefix),
note=_(
'{name} database name [@DEFAULT@]: '
).format(
name=name,
),
prompt=True,
default=defaultdbenvkeys[DEK.DATABASE],
)
if user is None:
user = self.dialog.queryString(
name='{qpref}USER'.format(qpref=queryprefix),
note=_(
'{name} database user [@DEFAULT@]: '
).format(
name=name,
),
prompt=True,
default=defaultdbenvkeys[DEK.USER],
)
if password is None:
password = self.dialog.queryString(
name='{qpref}PASSWORD'.format(qpref=queryprefix),
note=_(
'{name} database password: '
).format(
name=name,
),
prompt=True,
hidden=True,
)
dbenv = {
self._dbenvkeys[DEK.HOST]: host,
self._dbenvkeys[DEK.PORT]: port,
self._dbenvkeys[DEK.SECURED]: secured,
self._dbenvkeys[DEK.HOST_VALIDATION]: securedHostValidation,
self._dbenvkeys[DEK.USER]: user,
self._dbenvkeys[DEK.PASSWORD]: password,
self._dbenvkeys[DEK.DATABASE]: db,
}
if interactive:
try:
self.tryDatabaseConnect(dbenv)
self._checkDbConf(environment=dbenv, name=name)
self.environment.update(dbenv)
connectionValid = True
except RuntimeError as e:
self.logger.error(
_('Cannot connect to {name} database: {error}').format(
name=name,
error=e,
)
)
else:
# this is usally reached in provisioning
# or if full ansewr file
self.environment.update(dbenv)
connectionValid = True
try:
self.environment[
self._dbenvkeys[DEK.NEW_DATABASE]
] = self.isNewDatabase()
except:
self.logger.debug('database connection failed', exc_info=True)
if not _ind_env(self, DEK.NEW_DATABASE):
self._checkDbConf(environment=dbenv, name=name)
# vim: expandtab tabstop=4 shiftwidth=4
| 1.773438 | 2 |
home/migrations/0003_site_image.py | txcary/startpage | 0 | 12763558 | <reponame>txcary/startpage
# Generated by Django 2.1.3 on 2018-11-12 14:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0002_auto_20181112_1334'),
]
operations = [
migrations.AddField(
model_name='site',
name='image',
field=models.CharField(default='', max_length=1024),
),
]
| 1.625 | 2 |
tests/downloads/conftest.py | allenmichael/pyTenable | 0 | 12763559 | <reponame>allenmichael/pyTenable<gh_stars>0
import pytest, os, uuid, warnings
from tenable.downloads import Downloads
from tenable.errors import *
@pytest.fixture(scope='module')
def vcr_config():
return {
'filter_headers': [
('Authorization', 'Bearer 000'),
],
}
@pytest.fixture(autouse=True, scope='module')
def dl(request, vcr):
warnings.filterwarnings('ignore', category=DeprecationWarning)
return Downloads(os.getenv('DL_TOKEN'),
vendor='pytest',
product='pytenable-automated-testing') | 1.851563 | 2 |
src/hive_udf/hive_udf_example.py | zpz/hive-udf | 0 | 12763560 | <reponame>zpz/hive-udf
'''
This is an example UDF for Hive, to be processed by `make_udf` and used in tests.
UDFs should follow this example. The main points include:
1. The code in this module is written against the Python version that is
available on the Hive cluster.
2. Because this module runs in the environment on the server (worker nodes
to be precise), only packages in the server's environment can be used.
To be safe, it's a good idea to make this module *self sufficient* and use
Python's standard libraries only. It should not use disk files.
3. This example defines a main function that processes a single line,
and the entry block passes individual lines on stdin to this function.
This is a reasonable pattern to use.
However, if you have an existing UDF script that works but does not
follow this pattern, it is not necessary to modify the script.
It is expected that `make_udf` 'just works'.
4. `from __future__ import print_function` is recommended.
If this module is written against Python 2, but this module needs
to be imported into Python 3 code, then this import is required.
What this UDF does:
Take two columns: `id`, `info_json`.
Parse `info_json` to get `make` and `price`.
Print out these two columns.
If `price` does not exist in `make`, that column is NULL.
'''
from __future__ import print_function
import json
import sys
SEP = '\t'
NULL = '\\N'
def main():
for line in sys.stdin:
_, info_json = line.strip().split(SEP)
info = json.loads(info_json)
print(info['make'] + SEP + str(info.get('price', NULL)))
if __name__ == '__main__':
main()
| 2.671875 | 3 |
yumsync/yumrepo.py | hoonetorg/yumsync | 21 | 12763561 | # standard imports
from contextlib import contextmanager
from urllib2 import urlopen
from urlparse import urlparse
import copy
import os
import shutil
import sys
import tempfile
import time
# third-party imports
import createrepo
import yum
# local imports
from yumsync.yumbase import YumBase
import yumsync.util as util
class MetadataBuildError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class PackageDownloadError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class YumRepo(object):
def __init__(self, repoid, base_dir, opts=None):
# make sure good defaults
if opts is None:
opts = {}
opts = self._set_default_opts(opts)
self._validate_opts(opts)
self._validate_type(base_dir, 'base_dir', str)
# set actual repo object
self.__repo_obj = self._get_repo_obj(repoid, opts['local_dir'], opts['baseurl'], opts['mirrorlist'])
self.__repo_obj.includepkgs = opts['includepkgs']
self.__repo_obj.exclude = opts['excludepkgs']
self.id = repoid
self.checksum = opts['checksum']
self.combine = opts['combined_metadata'] if opts['version'] else None
self.delete = opts['delete']
self.gpgkey = opts['gpgkey']
self.link_type = opts['link_type']
self.local_dir = opts['local_dir']
self.stable = opts['stable']
self.version = time.strftime(opts['version']) if opts['version'] else None
self.srcpkgs = opts['srcpkgs']
self.newestonly = opts['newestonly']
# root directory for repo and packages
self.dir = os.path.join(base_dir, self._friendly(self.id))
self.package_dir = os.path.join(self.dir, 'packages')
# version directory for repo and packages
self.version_dir = os.path.join(self.dir, self.version) if self.version else None
self.version_package_dir = os.path.join(self.version_dir, 'packages') if self.version_dir else None
# log directory for repo
self.log_dir = self.version_dir if self.version_dir else self.dir
# public directroy for repo
self.public_dir = os.path.join(base_dir, 'public', self._sanitize(self.id))
# set default callbacks
self.__repo_callback_obj = None
self.__yum_callback_obj = None
# set repo placeholders
self._packages = []
self._comps = None
@staticmethod
def _validate_type(obj, obj_name, *obj_types):
valid_types = list(obj_types)
if obj_name is None:
obj_name = 'object'
if len(valid_types) < 1:
raise ValueError('no valid types were passed in for {}'.format(obj_name))
if None in obj_types:
valid_types.remove(None)
valid_types.sort()
valid_types.append(type(None))
if not isinstance(obj, tuple(valid_types)):
valid_str = ', '.join([t.__name__ for t in valid_types])
raise TypeError('{} is {}; must be {}'.format(obj_name, type(obj).__name__, valid_str))
@staticmethod
def _validate_url(url):
if not (url.startswith('http://') or url.startswith('https://') or url.startswith('file://')):
raise ValueError('Unsupported URL format "{}"'.format(url))
@staticmethod
def _set_default_opts(opts=None):
if not isinstance(opts, dict):
opts = {}
if 'baseurl' not in opts:
opts['baseurl'] = None
if 'checksum' not in opts:
opts['checksum'] = None
if 'combined_metadata' not in opts:
opts['combined_metadata'] = None
if 'delete' not in opts:
opts['delete'] = None
if 'excludepkgs' not in opts:
opts['excludepkgs'] = None
if 'gpgkey' not in opts:
opts['gpgkey'] = None
if 'includepkgs' not in opts:
opts['includepkgs'] = None
if 'link_type' in opts and isinstance(opts['link_type'], str):
opts['link_type'] = opts['link_type'].lower()
if 'link_type' not in opts or (opts['link_type'] != 'symlink' and opts['link_type'] != 'hardlink'):
opts['link_type'] = 'symlink'
if 'local_dir' not in opts:
opts['local_dir'] = None
if 'mirrorlist' not in opts:
opts['mirrorlist'] = None
if 'stable' not in opts:
opts['stable'] = None
if not isinstance(opts['stable'], str) and opts['stable'] is not None:
opts['stable'] = str(opts['stable'])
if 'version' not in opts:
opts['version'] = '%Y/%m/%d'
if 'srcpkgs' not in opts:
opts['srcpkgs'] = None
if 'newestonly' not in opts:
opts['newestonly'] = None
return opts
@classmethod
def _validate_opts(cls, opts):
cls._validate_type(opts['baseurl'], 'baseurl', str, list, None)
if isinstance(opts['baseurl'], list):
for b in opts['baseurl']:
cls._validate_type(b, 'baseurl (in list)', str)
cls._validate_url(b)
elif isinstance(opts['baseurl'], str):
cls._validate_url(opts['baseurl'])
cls._validate_type(opts['checksum'], 'checksum', str, None)
cls._validate_type(opts['combined_metadata'], 'combined_metadata', bool, None)
cls._validate_type(opts['delete'], 'delete', bool, None)
cls._validate_type(opts['excludepkgs'], 'excludepkgs', str, list, None)
if isinstance(opts['excludepkgs'], list):
for e in opts['excludepkgs']:
cls._validate_type(e, 'excludepkgs (in list)', str)
cls._validate_type(opts['gpgkey'], 'gpgkey', str, list, None)
if isinstance(opts['gpgkey'], list):
for g in opts['gpgkey']:
cls._validate_type(g, 'gpgkey (in list)', str)
cls._validate_url(g)
elif opts['gpgkey'] is str:
cls._validate_url(opts['gpgkey'])
cls._validate_type(opts['includepkgs'], 'includepkgs', str, list, None)
if isinstance(opts['includepkgs'], list):
for i in opts['includepkgs']:
cls._validate_type(i, 'includepkgs (in list)', str)
cls._validate_type(opts['link_type'], 'link_type', str)
cls._validate_type(opts['local_dir'], 'local_dir', str, None)
cls._validate_type(opts['mirrorlist'], 'mirrorlist', str, None)
if opts['mirrorlist'] is not None:
cls._validate_url(opts['mirrorlist'])
cls._validate_type(opts['stable'], 'stable', str, None)
cls._validate_type(opts['version'], 'version', str, None)
cls._validate_type(opts['srcpkgs'], 'srcpkgs', bool, None)
cls._validate_type(opts['newestonly'], 'newestonly', bool, None)
@staticmethod
def _sanitize(text):
return text.strip().strip('/')
@classmethod
def _friendly(cls, text):
return cls._sanitize(text).replace('/', '_')
@staticmethod
def _get_repo_obj(repoid, localdir=None, baseurl=None, mirrorlist=None):
yb = YumBase()
if baseurl is not None:
if isinstance(baseurl, list):
repo = yb.add_enable_repo(repoid, baseurls=baseurl)
else:
repo = yb.add_enable_repo(repoid, baseurls=[baseurl])
elif mirrorlist is not None:
repo = yb.add_enable_repo(repoid, mirrorlist=mirrorlist)
elif localdir:
repo = yb.add_enable_repo(repoid)
else:
raise ValueError('One or more baseurls or mirrorlist required')
return repo
def set_repo_callback(self, callback):
self.__repo_callback_obj = callback
def set_yum_callback(self, callback):
self.__yum_callback_obj = callback
def _set_path(self, path):
repo = copy.copy(self.__repo_obj)
try:
repo.pkgdir = path
except yum.Errors.RepoError:
pass
return repo
def setup_directories(self):
if self.local_dir and self.link_type == 'symlink':
if not os.path.islink(self.package_dir) and os.path.isdir(self.package_dir):
shutil.rmtree(self.package_dir)
util.symlink(self.package_dir, self.local_dir)
else:
if os.path.islink(self.package_dir):
os.unlink(self.package_dir)
util.make_dir(self.package_dir)
if self.version_dir:
if os.path.islink(self.version_package_dir) or os.path.isfile(self.version_package_dir):
os.unlink(self.version_package_dir)
elif os.path.isdir(self.version_package_dir):
shutil.rmtree(self.version_package_dir)
if self.link_type == 'symlink':
util.symlink(self.version_package_dir, os.path.relpath(self.package_dir, self.version_dir))
else: # hardlink
util.make_dir(self.version_package_dir)
def download_gpgkey(self):
if self.gpgkey:
gpgkey_paths = []
if isinstance(self.gpgkey, list):
gpgkey_iter = self.gpgkey
else:
gpgkey_iter = [self.gpgkey]
for gpgkey in gpgkey_iter:
try:
keyname = os.path.basename(urlparse(gpgkey).path)
key_path = os.path.join(self.dir, keyname)
if not os.path.exists(key_path):
key_data = urlopen(gpgkey)
with open(key_path, 'w') as f:
f.write(key_data.read())
key_data.close()
self._callback('gpgkey_download', os.path.basename(key_path))
else:
self._callback('gpgkey_exists', os.path.basename(key_path))
gpgkey_paths.append(key_path)
except Exception as e:
self._callback('gpgkey_error', str(e))
return gpgkey_paths
return None
def prepare_packages(self):
self.download_packages()
self.prune_packages()
self.version_packages()
def download_packages(self):
if self.local_dir:
self._download_local_packages()
else:
self._download_remote_packages()
@classmethod
def _validate_packages(cls, directory, packages):
ts = yum.rpmUtils.transaction.initReadOnlyTransaction()
if isinstance(packages, str):
return cls._validate_package(ts, directory, packages)
elif isinstance(packages, list):
valid = []
for pkg in packages:
if cls._validate_package(ts, directory, pkg):
valid.append(pkg)
return valid
else:
return None
@staticmethod
def _validate_package(ts, directory, package):
h = None
try:
pkg_path = os.path.join(directory, package)
h = yum.rpmUtils.miscutils.hdrFromPackage(ts, pkg_path)
except yum.rpmUtils.RpmUtilsError:
pass
return h
def _download_local_packages(self):
try:
packages = self._validate_packages(self.local_dir, sorted(os.listdir(self.local_dir)))
self._callback('repo_init', len(packages), True)
for _file in packages:
if self.link_type == 'hardlink':
status = util.hardlink(os.path.join(self.local_dir, _file), os.path.join(self.package_dir, _file))
if status:
size = os.path.getsize(os.path.join(self.local_dir, _file))
self._callback('link_local_pkg', _file, size)
else:
self._callback('pkg_exists', _file)
else:
self._callback('pkg_exists', _file)
self._packages = packages
self._callback('repo_complete')
except (KeyboardInterrupt, SystemExit):
pass
except Exception as e:
self._callback('repo_error', str(e))
raise PackageDownloadError(str(e))
def _download_remote_packages(self):
@contextmanager
def suppress():
""" Suppress stdout within a context.
This is necessary in this use case because, unfortunately, the YUM
library will do direct printing to stdout in many error conditions.
Since we are maintaining a real-time, in-place updating presentation
of progress, we must suppress this, as we receive exceptions for our
reporting purposes anyways.
"""
stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
yield
sys.stdout = stdout
try:
yb = YumBase()
if self.srcpkgs:
if not 'src' in yb.arch.archlist:
yb.arch.archlist.append('src')
repo = self._set_path(self.package_dir)
if self.__yum_callback_obj:
repo.setCallback(self.__yum_callback_obj)
yb.repos.add(repo)
yb.repos.enableRepo(repo.id)
with suppress():
if self.newestonly:
packages = yb.pkgSack.returnNewestByNameArch()
else:
packages = yb.pkgSack.returnPackages()
# Inform about number of packages total in the repo.
self._callback('repo_init', len(packages))
# Check if the packages are already downloaded. This is probably a bit
# expensive, but the alternative is simply not knowing, which is
# horrible for progress indication.
for po in packages:
local = po.localPkg()
self._packages.append(os.path.basename(local))
if os.path.exists(local):
if yb.verifyPkg(local, po, False):
self._callback('pkg_exists', os.path.basename(local))
with suppress():
yb.downloadPkgs(packages)
self._callback('repo_complete')
except (KeyboardInterrupt, SystemExit):
pass
except Exception as e:
self._callback('repo_error', str(e))
raise PackageDownloadError(str(e))
def prune_packages(self):
# exit if we don't have packages
if not self._packages:
return
if self.delete:
if not self.version or self.link_type != 'symlink':
for _file in os.listdir(self.package_dir):
if _file not in self._packages:
os.unlink(os.path.join(self.package_dir, _file))
self._callback('delete_pkg', _file)
else:
packages_to_validate = sorted(list(set(os.listdir(self.package_dir)) - set(self._packages)))
self._packages.extend(self._validate_packages(self.package_dir, packages_to_validate))
def version_packages(self):
# exit if we don't have packages
if not self._packages:
return
if self.version and self.link_type == 'hardlink':
for pkg in self._packages:
source_file = os.path.join(self.package_dir, pkg)
target_file = os.path.join(self.version_package_dir, pkg)
util.hardlink(source_file, target_file)
def get_group_data(self):
if self.local_dir:
self._comps = None
else:
try:
yb = YumBase()
yb.repos.add(self.__repo_obj)
self._comps = yb._getGroups().xml()
except yum.Errors.GroupsError:
pass
if self._comps:
self._callback('repo_group_data', 'available')
else:
self._callback('repo_group_data', 'unavailable')
def build_metadata(self):
staging = tempfile.mkdtemp(prefix='yumsync-', suffix='-metadata')
if self._packages is None:
packages = []
else:
packages = [os.path.join(os.path.basename(self.package_dir), pkg) for pkg in self._packages]
if self.checksum == 'sha' or self.checksum == 'sha1':
sumtype = 'sha'
else:
sumtype = 'sha256'
conf = createrepo.MetaDataConfig()
conf.directory = os.path.dirname(self.package_dir)
conf.outputdir = staging
conf.sumtype = sumtype
conf.pkglist = packages
conf.quiet = True
if self._comps:
groupdir = tempfile.mkdtemp(prefix='yumsync-', suffix='-groupdata')
conf.groupfile = os.path.join(groupdir, 'groups.xml')
with open(conf.groupfile, 'w') as f:
f.write(self._comps)
generator = createrepo.SplitMetaDataGenerator(conf)
generator.doPkgMetadata()
generator.doRepoMetadata()
generator.doFinalMove()
if self._comps and os.path.exists(groupdir):
shutil.rmtree(groupdir)
return staging
def prepare_metadata(self):
self.get_group_data()
self._callback('repo_metadata', 'building')
try:
staging = self.build_metadata()
except Exception as e:
self._callback('repo_error', str(e))
raise MetadataBuildError(str(e))
repodata_dir = os.path.join(self.dir, 'repodata')
if os.path.exists(repodata_dir):
shutil.rmtree(repodata_dir)
if not self.version or self.combine:
shutil.copytree(os.path.join(staging, 'repodata'), repodata_dir)
if self.version:
repodata_dir = os.path.join(self.version_dir, 'repodata')
if os.path.exists(repodata_dir):
shutil.rmtree(repodata_dir)
shutil.copytree(os.path.join(staging, 'repodata'), repodata_dir)
# cleanup temporary metadata
shutil.rmtree(staging)
self._callback('repo_metadata', 'complete')
def create_links(self):
if self.version:
util.symlink(os.path.join(self.dir, 'latest'), self.version)
self._callback('repo_link_set', 'latest', self.version)
if self.stable:
util.symlink(os.path.join(self.dir, 'stable'), self.stable)
self._callback('repo_link_set', 'stable', self.stable)
elif os.path.lexists(os.path.join(self.dir, 'stable')):
os.unlink(os.path.join(self.dir, 'stable'))
else:
if os.path.lexists(os.path.join(self.dir, 'latest')):
os.unlink(os.path.join(self.dir, 'latest'))
if os.path.lexists(os.path.join(self.dir, 'stable')):
os.unlink(os.path.join(self.dir, 'stable'))
def sync(self):
try:
self.setup_directories()
self.download_gpgkey()
self.prepare_packages()
self.prepare_metadata()
self.create_links()
except MetadataBuildError:
return False
except PackageDownloadError:
return False
def __str__(self):
raw_info = {}
if self.checksum:
raw_info['checksum'] = self.checksum
if self.combine is not None:
raw_info['combine'] = self.combine
if self.delete is not None:
raw_info['delete'] = self.delete
if self.gpgkey:
raw_info['gpgkey'] = self.gpgkey
if self.link_type:
raw_info['link_type'] = self.link_type
if self.local_dir:
raw_info['local_dir'] = self.local_dir
if self.stable:
raw_info['stable'] = self.stable
if self.version:
raw_info['version'] = self.version
if self.srcpkgs is not None:
raw_info['srcpkgs'] = self.srcpkgs
if self.newestonly is not None:
raw_info['newestonly'] = self.newestonly
friendly_info = ['{}({})'.format(k, raw_info[k]) for k in sorted(raw_info)]
return '{}: {}'.format(self.id, ', '.join(friendly_info))
def _callback(self, event, *args):
if self.__repo_callback_obj and hasattr(self.__repo_callback_obj, event):
method = getattr(self.__repo_callback_obj, event)
method(self.id, *args)
| 2.078125 | 2 |
product/product_labels.py | saiihamza/open_data_parsing | 0 | 12763562 | <reponame>saiihamza/open_data_parsing
class ProductLabels(object):
def __init__(self, labels, labels_tags, labels_fr):
self.Labels = labels
self.LabelsTags = labels_tags
self.LabelsFr = labels_fr
def __str__(self):
return self .Labels
| 2.828125 | 3 |
consort/tools/SimpleDynamicExpression.py | josiah-wolf-oberholtzer/consort | 9 | 12763563 | <filename>consort/tools/SimpleDynamicExpression.py<gh_stars>1-10
import abjad
from abjad import attach
from abjad import inspect
from abjad import iterate
from abjad import override
from abjad.tools import abctools
from abjad.tools import indicatortools
from abjad.tools import instrumenttools
from abjad.tools import selectiontools
from abjad.tools import spannertools
class SimpleDynamicExpression(abctools.AbjadValueObject):
r'''A dynamic expression.
.. container:: example
::
>>> dynamic_expression = consort.SimpleDynamicExpression(
... hairpin_start_token='sfp',
... hairpin_stop_token='<PASSWORD>',
... )
::
>>> staff = abjad.Staff("c'8 d'8 e'8 f'8 g'8 a'8 b'8 c''8")
>>> dynamic_expression(staff[2:-2])
>>> print(format(staff))
\new Staff {
c'8
d'8
\override Hairpin.circled-tip = ##t
e'8 \> \sfp
f'8
g'8
\revert Hairpin.circled-tip
a'8 \!
b'8
c''8
}
.. container:: example
::
>>> dynamic_expression = consort.SimpleDynamicExpression(
... 'f', 'p',
... )
>>> staff = abjad.Staff("c'8 d'8 e'8 f'8 g'8 a'8 b'8 c''8")
>>> dynamic_expression(staff[2:-2])
>>> print(format(staff))
\new Staff {
c'8
d'8
e'8 \> \f
f'8
g'8
a'8 \p
b'8
c''8
}
'''
### CLASS VARIABLES ###
__slots__ = (
'_hairpin_start_token',
'_hairpin_stop_token',
'_minimum_duration',
)
### INITIALIZER ###
def __init__(
self,
hairpin_start_token='p',
hairpin_stop_token=None,
minimum_duration=abjad.Duration(1, 4),
):
known_dynamics = indicatortools.Dynamic._dynamic_names
assert hairpin_start_token in known_dynamics, \
(known_dynamics, hairpin_start_token)
if hairpin_stop_token is not None:
assert hairpin_stop_token in known_dynamics
assert hairpin_start_token != '<PASSWORD>' or hairpin_stop_token != '<PASSWORD>'
if hairpin_start_token == '<PASSWORD>':
assert hairpin_stop_token is not None
self._hairpin_start_token = hairpin_start_token
self._hairpin_stop_token = hairpin_stop_token
if minimum_duration is not None:
minimum_duration = abjad.Duration(minimum_duration)
self._minimum_duration = minimum_duration
### SPECIAL METHODS ###
def __call__(self, music, name=None):
if not isinstance(music, selectiontools.Selection):
music = selectiontools.Selection(music)
is_short_group = False
if len(music) < 2:
is_short_group = True
elif self.minimum_duration is not None:
if music.get_duration() < self.minimum_duration:
is_short_group = True
instrument = abjad.inspect(music[0]).get_effective(
instrumenttools.Instrument,
)
logical_ties = tuple(iterate(music).by_logical_tie(pitched=True))
if len(logical_ties) < 3:
if instrument == instrumenttools.Piano() or \
instrument == instrumenttools.Percussion():
is_short_group = True
grace_notes = None
previous_leaf = abjad.inspect(music[0]).get_leaf(-1)
if previous_leaf is not None:
after_grace = abjad.inspect(previous_leaf).get_after_grace_container()
if after_grace is not None:
grace_notes = list(iterate(after_grace).by_leaf())
music = selectiontools.ContiguousSelect(
tuple(grace_notes) + tuple(music),
)
start_token = self.hairpin_start_token
stop_token = self.hairpin_stop_token
if is_short_group or stop_token is None:
if start_token == '<PASSWORD>':
start_token = stop_token
if start_token.startswith('fp'):
start_token = start_token[1:]
command = indicatortools.LilyPondCommand(start_token, 'right')
attach(command, music[0], name=name)
return
start_ordinal = NegativeInfinity
if start_token != 'n<PASSWORD>':
start_ordinal = indicatortools.Dynamic.dynamic_name_to_dynamic_ordinal(
start_token)
stop_ordinal = NegativeInfinity
if stop_token != 'n<PASSWORD>':
stop_ordinal = indicatortools.Dynamic.dynamic_name_to_dynamic_ordinal(stop_token)
items = []
is_circled = False
if start_ordinal < stop_ordinal:
if start_token != 'n<PASSWORD>':
items.append(start_token)
else:
is_circled = True
items.append('<')
items.append(stop_token)
elif stop_ordinal < start_ordinal:
items.append(start_token)
items.append('>')
if stop_token != 'n<PASSWORD>':
items.append(stop_token)
else:
#items.append('!')
is_circled = True
hairpin_descriptor = ' '.join(items)
hairpin = spannertools.Hairpin(
descriptor=hairpin_descriptor,
include_rests=False,
)
if is_circled:
override(hairpin).hairpin.circled_tip = True
attach(hairpin, music, name=name)
### PUBLIC PROPERTIES ###
@property
def hairpin_start_token(self):
return self._hairpin_start_token
@property
def hairpin_stop_token(self):
return self._hairpin_stop_token
@property
def minimum_duration(self):
return self._minimum_duration
| 2.265625 | 2 |
karabo_bridge/tests/test_server.py | European-XFEL/karabo-bridge-py | 6 | 12763564 | from karabo_bridge import Client
from .utils import compare_nested_dict
def test_req_rep(server, data):
for _ in range(3):
server.feed(data)
with Client(server.endpoint) as client:
for _ in range(3):
d, m = client.next()
compare_nested_dict(data, d)
| 2.5 | 2 |
django_flex_user/urls.py | ebenh/django-flex-user | 1 | 12763565 | <reponame>ebenh/django-flex-user
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from django_flex_user import views
urlpatterns = [
path('csrf-tokens/', views.get_csrf_token),
path('users/', views.FlexUsers.as_view()),
path('users/user/', views.FlexUser.as_view()),
path('users/user/oauth-providers/', views.OAuthProviders.as_view()),
path('sessions/', views.Sessions.as_view()),
path('otp-tokens/', views.OTPTokens.as_view()),
path('otp-tokens/email/<str:pk>', views.EmailToken.as_view(), name='email-token'),
path('otp-tokens/phone/<str:pk>', views.PhoneToken.as_view(), name='phone-token'),
]
# djangorestframework
urlpatterns = format_suffix_patterns(urlpatterns)
| 2.078125 | 2 |
baikeSpider/spiders/wiki_en_spider.py | pluto-junzeng/baiduSpider | 13 | 12763566 | <gh_stars>10-100
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author:lichunhui
@Time: 2018/7/26 19:48
@Description:
"""
import re
import copy
from itertools import chain
from urllib.parse import unquote
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import Selector
from scrapy.spiders import Rule
from baikeSpider.cache.html_cache import CacheTool
from baikeSpider.items import WikiENSpiderItem
from .redis_spider import RedisCrawlSpider
from ..config import wiki_en_task_queue, wiki_en_spider_name
class WikiENSpider(RedisCrawlSpider):
task_queue = wiki_en_task_queue
base_url = "https://en.wikipedia.org"
name = wiki_en_spider_name
allowed_domains = ['en.wikipedia.org']
rules = (
Rule(LinkExtractor(allow=('https://en.wikipedia.org/wiki/',)), callback='parse', follow=True),
)
custom_settings = {
'DOWNLOADER_MIDDLEWARES': {
'scrapy.downloadermiddleware.useragent.UserAgentMiddleware': None,
'baikeSpider.middlewares.MyUserAgentMiddleware': 400,
'baikeSpider.middlewares.MyRetryMiddleware': 501,
'baikeSpider.middlewares.MyProxyMiddleware': 100,
'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 110,
}
}
def parse(self, response):
items = WikiENSpiderItem()
selector = Selector(response)
items['url'] = unquote(response.url)
items['html'] = response.text
title = selector.xpath("/html/head/title/text()").extract()
if title:
items['title'] = title[0].strip().encode('utf-8', errors='ignore').decode('utf-8')
else:
items['title'] = ''
note = selector.xpath("//div[@class=\"hatnote navigation-not-searchable\"]").xpath("string(.)").extract()
if note:
tmps = note[0].encode('utf-8', errors='ignore').decode('utf-8')
items['note'] = re.sub('(\r\n){2,}|\n{2,}|\r{2,}', '\n', tmps)
else:
items['note'] = ''
catalog = selector.xpath("//div[@class=\"toc\"]").xpath("string(.)").extract()
if catalog:
tmpc = catalog[0].encode('utf-8', errors='ignore').decode('utf-8')
items['catalog'] = re.sub('(\r\n){2,}|\n{2,}|\r{2,}', '\n', tmpc)
else:
items['catalog'] = ''
# 进行迭代抓取的item链接
sub_urls = [unquote(item) for item in selector.xpath("//a[@title]/@href").extract()]
items['keywords_url'] = list(set(filter(lambda x: 'wiki' in x and 'http' not in x, sub_urls)))
description = selector.xpath("//div[@class=\"mw-parser-output\"]//p").xpath("string(.)").extract()
if description:
tmpds = [d.encode('utf-8', errors='ignore').decode('utf-8') for d in description]
tmpd = ''.join(tmpds)
items['description'] = re.sub('(\r\n){2,}|\n{2,}|\r{2,}', '\n', tmpd)
else:
items['description'] = ''
# 匹配pic
items['embed_image_url'] = CacheTool.parse_wiki_pic(items['html'])
# //*[@id="footer-info-lastmod"]
update_time = selector.xpath("//*[@id=\"footer-info-lastmod\"]").xpath("string(.)").extract()
if update_time:
tmpu = update_time[0].strip().encode('utf-8', errors='ignore').decode('utf-8')
items['update_time'] = re.sub('(\r\n){2,}|\n{2,}|\r{2,}', '\n', tmpu)
else:
items['update_time'] = ''
rm_1 = selector.xpath(
"//div[@class =\"refbegin columns references-column-count references-column-count-3\"]").xpath(
"string(.)").extract()
rm_2 = selector.xpath(
"//div[@class =\"refbegin columns references-column-count references-column-count-2\"]").xpath(
"string(.)").extract()
rm_3 = selector.xpath(
"//div[@class =\"reflist columns references-column-count references-column-count-2\"]").xpath(
"string(.)").extract()
rm_4 = selector.xpath("//ol[@class =\"references\"]").xpath("string(.)").extract()
reference_material = list(chain(rm_1, rm_2, rm_3, rm_4))
if reference_material:
tmpr = [rm.encode('utf-8', errors='ignore').decode('utf-8') for rm in reference_material]
tmpr = ' '.join(tmpr)
items['reference_material'] = re.sub('(\r\n){2,}|\n{2,}|\r{2,}', '\n', tmpr)
else:
items['reference_material'] = ''
item_tag = selector.xpath("//div[@id = \"mw-normal-catlinks\"]/ul").xpath("string(.)").extract()
if item_tag:
tmpi = item_tag[0].encode('utf-8', errors='ignore').decode('utf-8')
items['item_tag'] = re.sub('(\r\n){2,}|\n{2,}|\r{2,}', '\n', tmpi)
else:
items['item_tag'] = ''
# print("英文===============>", items['title'])
yield copy.deepcopy(items)
| 2.125 | 2 |
fdap/app/contracts/service.py | miniyus/AutomaticPosting-Python | 0 | 12763567 | <filename>fdap/app/contracts/service.py
from fdap.app.contracts.logging import Logging
from fdap.app.exceptions.parse_exceptoin import ParseException
class Service(Logging):
def __init__(self):
super().__init__()
def throw(self, code: int, message: str):
raise ParseException(code, message)
| 2.15625 | 2 |
var/spack/repos/builtin/packages/ispc/package.py | tomdele/spack | 9 | 12763568 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import platform
from spack import *
class Ispc(Package):
"""ispc is a compiler for a variant of the C programming language, with
extensions for single program, multiple data programming mainly aimed
at CPU SIMD platforms."""
homepage = "https://github.com/ispc/ispc/"
url = "https://github.com/ispc/ispc/releases/download/v1.10.0/ispc-v1.10.0b-linux.tar.gz"
version('1.14.1', sha256='8cc0dae16b3ac244aa05e8b1db1dadf35aeb8d67916aaee6b66efb813b8e9174')
version('1.13.0', sha256='8ab1189bd5db596b3eee9d9465d3528b6626a7250675d67102761bb0d284cd21')
version('1.12.0', sha256='7a2bdd5fff5c1882639cfbd66bca31dbb68c7177f3013e80b0813a37fe0fdc23')
version('1.11.0', sha256='dae7d1abf950dea722fe3c535e4fa43a29c0b67b14d66e566ab2fa760ee82f38')
version('1.10.0', sha256='453211ade91c33826f4facb1336114831adbd35196d016e09d589a6ad8699aa3')
def url_for_version(self, version):
url = "https://github.com/ispc/ispc/releases/download/v{0}/ispc-v{0}{2}-{1}.tar.gz"
system = platform.system()
if system == 'Darwin':
checksums = {
Version('1.14.1'): '50d5ba0268cd22a310eaf6ab4e00121bf83cc301396c6180e0fc1b897b40743c',
Version('1.13.0'): '0dc7eaf3335b299e262f052fb96f0ad5a4e04a41492f399b690ca788e0fd304b',
Version('1.12.0'): 'e6c917b964e43218c422b46c9a6c71b876d88d0791da2ee3732b20a2e209c018',
Version('1.11.0'): '5205e0fca11361f8527d3489ee1503fd79ab8511db6399830c052ccf210cc3b7',
Version('1.10.0'): '2b2e2499549ce09a6597b6b645e387953de84544ecb44307e7ee960c9b742a89'
}
self.versions[version] = {'checksum': checksums[version]}
if self.spec.satisfies('@1.11.0:'):
return url.format(version, 'macOS', '')
else:
return url.format(version, 'osx', '')
else: # linux
if self.spec.satisfies('@1.13.0:'):
suffix = ''
else:
suffix = 'b'
return url.format(version, 'linux', suffix)
def install(self, spec, prefix):
for d in ['bin', 'examples']:
if os.path.isdir(d):
install_tree(d, join_path(self.prefix, d))
| 1.828125 | 2 |
py_system/prototype/__init__.py | mingpz2010/PhysicsThinker | 1 | 12763569 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
The basic algorithm simulation archtecture of the C/C++ system
"""
import numpy as np
PRIME= np.array([2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71,
73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151,
157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233,
239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317,
331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419,
421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503,
509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607,
613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811,
821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911,
919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013,
1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091,
1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181,
1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277,
1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361,
1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451,
1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531,
1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609,
1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699,
1709, 1721, 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789,
1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889,
1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997,
1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083,
2087, 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161,
2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273,
2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357,
2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441,
2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551,
2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663,
2671, 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729,
2731, 2741, 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819,
2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917,
2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023,
3037, 3041, 3049, 3061, 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137,
3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251,
3253, 3257, 3259, 3271]);
print(PRIME[::-1])
| 2.96875 | 3 |
src/trainer/model_step_handler.py | renyi-ai/drfrankenstein | 4 | 12763570 | <reponame>renyi-ai/drfrankenstein<gh_stars>1-10
import os
import pandas as pd
import torch
class ModelStepHandler:
def __init__(self,
model,
optimizer_name='sgd',
lr=1e-4,
multilabel=False,
weight_decay=0.):
self.model = model
self.optimizer_name = optimizer_name
self.lr = lr
self.multilabel = multilabel
self.weight_decay = weight_decay
self.loss = self._get_loss()
self.optimizer = self._get_optimizer()
self.stats = pd.DataFrame()
# Put device on cuda if possible
self.model.set_cuda()
@classmethod
def from_arg_config(cls, conf):
from src.dataset import get_n_classes_and_channels
from src.models import get_model
n_classes, n_channels = get_n_classes_and_channels(conf.dataset)
model = get_model(conf.model, n_classes, n_channels, conf.init_noise)
multilabel = conf.dataset == 'celeba'
return cls(model,
optimizer_name=conf.optimizer,
lr=conf.lr,
multilabel=multilabel,
weight_decay=conf.weight_decay)
@classmethod
def for_eval(cls, model_path):
from src.models import load_from_path, get_info_from_path
dataset = get_info_from_path(model_path)[1]
multilabel = dataset == 'celeba'
model = load_from_path(model_path)
return cls(model, multilabel=multilabel)
@property
def device(self):
''' Cuda or CPU '''
return self.model.device
def save(self, n_iter_ran, save_folder):
os.makedirs(save_folder, exist_ok=True)
filename = str(n_iter_ran) + '.pt'
path = os.path.join(save_folder, filename)
torch.save(self.model.state_dict(),
path,
_use_new_zipfile_serialization=False)
def append_and_save_log(self, epoch_data, save_folder):
self.stats = self.stats.append(epoch_data, ignore_index=True)
save_path = os.path.join(save_folder, 'training_log.csv')
self.stats.to_csv(save_path, index=False)
def summarize(self, shape):
try:
from torchsummary import summary
summary(self.model, shape)
except Exception as e:
print('Could not print out model graph. Skipping.')
def train_step(self, inputs, labels):
self.step(inputs, labels, is_train=True)
def eval_step(self, inputs, labels):
self.step(inputs, labels, is_train=False)
def evaluate(self, inputs, labels=None):
self.model.eval()
need_loss = not labels is None
inputs = inputs.to(self.device)
with torch.set_grad_enabled(False):
model_outputs = self.model(inputs)
rounded_predictions = self._get_predictions(model_outputs)
if need_loss:
labels = labels.to(self.device)
loss = self.loss(model_outputs, labels, inputs)
self.model.train()
if need_loss:
return rounded_predictions, loss
else:
return rounded_predictions
def step(self, inputs, labels, is_train):
inputs = inputs.to(self.device)
labels = labels.to(self.device)
self.optimizer.zero_grad()
with torch.set_grad_enabled(is_train):
model_outputs = self.model(inputs)
rounded_predictions = self._get_predictions(model_outputs)
loss = self.loss(model_outputs, labels, inputs)
metrics = self._get_metrics(labels, rounded_predictions, loss)
if is_train:
loss.backward()
self.optimizer.step()
return metrics
def _get_predictions(self, outputs):
if self.multilabel:
preds = torch.where(torch.nn.Sigmoid()(outputs) > 0.5,
torch.ones_like(outputs),
torch.zeros_like(outputs))
else:
preds = torch.max(outputs.data, 1)[1]
return preds
def _get_loss(self):
if self.multilabel:
loss_function = torch.nn.BCEWithLogitsLoss()
else:
loss_function = torch.nn.CrossEntropyLoss()
def mixed_loss(outputs, targets, inputs=None):
# The input arg might be important for other loss calculation
# so it's added as an optional argument here
return loss_function(outputs, targets)
return mixed_loss
def _get_optimizer(self):
if self.optimizer_name == "sgd":
return torch.optim.SGD(self.model.parameters(),
lr=self.lr,
momentum=0.9,
nesterov=True,
weight_decay=self.weight_decay)
elif self.optimizer_name == "adam":
return torch.optim.Adam(self.model.parameters(),
lr=self.lr,
weight_decay=self.weight_decay)
else:
raise NotImplementedError
def _get_metrics(self, labels, preds, loss):
data = {}
data['loss'] = loss.item()
data['accuracy'] = self._accuracy(labels, preds)
return data
def _accuracy(self, labels, preds):
equality = torch.sum(labels == preds, dtype=torch.float32)
accuracy = equality / labels.nelement()
return accuracy
def _get_save_subfolder(self, data_name):
in_folder = 'in' + str(self.model_trainer.model.seed)
gn_folder = 'gn' + str(self.gradient_noise)
initfolder = in_folder + '-' + gn_folder
folder = os.path.join(self.save_folder, self.model.name, data_name,
initfolder)
os.makedirs(folder, exist_ok=True)
return folder
| 2.296875 | 2 |
run.py | davidkiama/Passowrd-Locker | 0 | 12763571 | #!/usr/bin/env python3
import random
from user import User
from credentials import Credentials
def create_user(username, password):
'''
Function to create a new user
'''
new_user = User(username, password)
return new_user
def check_password(password):
"""
Function to check if the password entered is correct
"""
return User.check_password(password)
def ask_password():
"""
Function to ask for password when necessary
"""
print("Enter User password:")
password = input()
return password
def generate_password():
'''
Function to generate a random password
'''
password_list = []
characters_upper = 'abcdefghijklmnopqrstuvwxyz'.upper()
characters_lower = 'abcdefghijklmnopqrstuvwxyz'.lower()
numbers = '<PASSWORD>'
symbols = '!@#$%&*'
for i in range(3):
password_list.append(random.choice(characters_upper))
password_list.append(random.choice(characters_lower))
password_list.append(random.choice(numbers))
password_list.append(random.choice(symbols))
password = ''.join(password_list)
return password
def create_credential(account, password):
'''
Function to create a new credential
'''
new_credential = Credentials(account, password)
return new_credential
def save_credential(credential):
'''
Function to save new credential to the credential list
'''
credential.save_credential()
def display_credentials():
'''
Function that returns all the saved credentials
'''
return Credentials.display_credentials()
def find_credential(account):
'''
Function that finds a credential by account and returns the credential
'''
return Credentials.find_by_account(account)
def delete_credential(credential):
'''
Function to delete a credential
'''
credential.delete_credential()
def main():
"""
Function to run the program
"""
print("Welcome to Password Locker App")
print("What is your name?")
username = input()
print("Enter your password:")
password = input()
user = create_user(username, password)
print(f"Hello {user.username}. what would you like to do?")
print('\n')
while True:
print("""Use these short codes :
sc - Store existing credentials,
dc - Display credentials,
cc - Create a new credential,
del- delete a credential,
exit -exit the contact list """)
short_code = input().lower()
if short_code == "sc":
print("Enter your account name:")
account = input()
print("Enter your password:")
password = input()
save_credential(create_credential(account, password))
print('\n')
elif short_code == "cc":
print("Enter your account name:")
account = input()
print("Would you like to use a suggested password: (y/n)")
pwd_option = input().lower()
if pwd_option == "y":
password = generate_password()
print("Your password is: " + password)
else:
print("Enter your preferred password:")
password = input()
save_credential(create_credential(account, password))
print('\n')
elif short_code == "dc":
user_password = <PASSWORD>()
if user.check_password(user_password):
if display_credentials():
print("Here is a list of all your credentials")
print('\n')
for credential in display_credentials():
print(f"{credential.account} {credential.password}")
print('\n')
else:
print('\n')
print("You dont seem to have any credentials saved yet")
print('\n')
elif short_code == "del":
user_password = <PASSWORD>()
if user.check_password(user_password):
print("Enter the account name you want to delete:")
account = input()
if find_credential(account):
delete_credential(find_credential(account))
print("Credential has been deleted")
print('\n')
else:
print("That credential does not exist")
print('\n')
elif short_code == "exit":
print("Logging out .......")
break
else:
print("Please use the short codes provided")
if __name__ == '__main__':
main()
| 4.375 | 4 |
es.py | elliotxx/es-handle | 6 | 12763572 | #coding=utf8
'''
使用 python client 对 ElasticSearch 进行常用的增删改查操作
'''
from elasticsearch import Elasticsearch
import sys
import json
argname_list = ['cmd','addr','op','index','type','id']
default_host = '127.0.0.1'
default_port = '9200'
# 参数数量可能的范围 [Argument_Num_Min_Limit,Argument_Num_Max_Limit]
Argument_Num_Min_Limit = 2
Argument_Num_Max_Limit = 6
# 编码信息(shell编码 和 文件编码)
input_encoding = sys.stdin.encoding
output_encoding = sys.stdout.encoding
file_encoding = 'utf8'
def printx(s, end = '\n'):
'''通用输出'''
if isinstance(s,str):
s = s.decode(file_encoding)
s += end
s = s.encode(output_encoding)
sys.stdout.write(s)
elif isinstance(s,dict):
s = json.dumps(s, indent=4, ensure_ascii=False)
s += end
s = s.encode(output_encoding)
sys.stdout.write(s)
else:
print s
def getArguments():
'''获取参数'''
args = {'op':None}
args_num = len(sys.argv)
# 获得对应参数
for i in range(1,args_num):
if sys.argv[i] == '-h' or sys.argv[i] == '--help':
args['help'] = argname_list[i-1]
break
args[argname_list[i]] = sys.argv[i]
# 简单的参数验证
if not (Argument_Num_Min_Limit <= args_num <= Argument_Num_Max_Limit):
raise Exception,'参数错误:提供了错误的参数个数'
# 参数 addr 部分的处理
if not args.has_key('addr'):
# 参数中不存在地址信息,采用默认地址和端口
args['host'] = default_host
args['port'] = default_port
elif args['addr'].find(':')==-1:
# 参数中只存在地址的 host 信息,采用默认端口
args['host'] = args['addr']
args['port'] = default_port
else:
# 参数中指定了地址的 host 和 port 信息
args['host'],args['port'] = args['addr'].split(':')
return args
def connElasticsearch(args):
'''尝试连接 ElasticSearch'''
es = Elasticsearch(['%s:%s'%(args['host'],args['port'])])
try:
# 尝试连接
es.info()
except Exception,e:
raise Exception,'ElasticSearch <%s:%s> 连接失败!'%(args['host'],args['port'])
return es
def getBody():
'''获得请求体'''
blank_line_num = 0
body = ''
while(blank_line_num < 2):
s = raw_input('...')
body += s + '\n'
blank_line_num = blank_line_num + 1 if s=='' else 0
body = body.strip().replace("'","\"").decode(input_encoding)
body = json.loads(body)
return body
def Insert(es,args):
'''在 ElasticSearch 中插入数据'''
if args.has_key('index'):
if args.has_key('type'):
printx('请指定文档内容(JSON格式):')
args['body'] = getBody()
if args.has_key('id'):
# 提供参数:index, type, id
# 插入指定 id 的文档
res = es.index(index = args['index'], doc_type = args['type'], id = args['id'], body = args['body'])
else:
# 提供参数:index, type
# 插入指定文档,id自动生成
res = es.index(index = args['index'], doc_type = args['type'], body = args['body'])
else:
# 提供参数:index
printx('请指定待创建 index 的 settings 和 mappings(JSON格式):')
args['body'] = getBody()
res = es.indices.create(index = args['index'], body = args['body'])
printx('插入结果:')
printx(res)
else:
# 什么参数都没提供
raise Exception,'参数错误:没有插入对象'
def Delete(es,args):
'''在 ElasticSearch 中删除数据'''
if args.has_key('index'):
if args.has_key('type'):
if args.has_key('id'):
# 提供参数:index, type, id
# 删除指定 id 的文档
res = es.delete(index = args['index'], doc_type = args['type'], id = args['id'])
else:
# 提供参数:index, type
# 删除指定 type
res = es.delete_by_query(index = args['index'], doc_type = args['type'], body={"query":{"match_all":{}}})
else:
# 提供参数:index
# 删除指定 index
res = es.indices.delete(index = args['index'])
printx('删除结果:')
printx(res)
else:
# 什么参数都没提供
raise Exception,'参数错误:没有删除对象'
def Update(es,args):
'''在 ElasticSearch 中更新数据'''
if args.has_key('index'):
if args.has_key('type'):
if args.has_key('id'):
# 提供参数:index, type, id
# 更新指定 id 的文档
printx('请指定更新内容(JSON格式):')
args['body'] = getBody()
res = es.update(index = args['index'], doc_type = args['type'], id = args['id'], body = args['body'])
printx('更新结果:')
printx(res)
else:
# 提供参数:index, type
raise Exception,'参数错误:除了“索引”名和“类型”名,您还需要指定文档的“id”'
else:
# 提供参数:index
raise Exception,'参数错误:除了“索引”名,您还需要指定文档的“类型”名和“id”'
else:
# 什么参数都没提供
raise Exception,'参数错误:没有更新对象'
def Search(es,args):
'''在 ElasticSearch 中查询数据'''
if args.has_key('index'):
if args.has_key('type'):
if args.has_key('id'):
# 提供参数:index, type, id
# 查询指定 id 的文档
res = es.get(index = args['index'], doc_type = args['type'], id = args['id'])
else:
# 提供参数:index, type
# 查询指定 type
res = es.search(index = args['index'], doc_type = args['type'], body={"query":{"match_all":{}}})
else:
# 提供参数:index
# 查询指定 index
res = es.search(index = args['index'], body={"query":{"match_all":{}}})
else:
# 什么参数都没提供
# 查询所有索引简略信息
res = es.search(body={"query":{"match_all":{}}})
printx('查询结果:')
printx(res)
def Cat(es,args):
'''在 ElasticSearch 中查看状态'''
res = es.cat.indices(v=True)
printx('查看结果:')
printx(res)
def Info(es,args):
'''获取 ElasticSearch 连接信息'''
printx('Elasticsearch <%s:%s> 连接信息:'%(args['host'],args['port']))
printx(es.info())
def Help(args):
'''帮助信息'''
h = {
'basic' :
'''命令格式:
es.py [-h] IP[:port] [-h|option] [-h|index] [type] [id]
''',
'option' :{
'insert':
'''insert - 向 ElasticSearch 插入数据
支持 插入指定id的文档、插入不指定id的文档、仅创建 index 三种格式
''',
'delete':
'''delete - 从 ElasticSearch 删除数据
支持 删除文档、删除整个类型(type)、删除整个索引(index) 三种格式
注意:如果类型中数据过多,删除操作会异步进行
''',
'update':
'''update - 更新指定 ElasticSearch 文档内容
支持 更新指定id的文档内容 一种格式
注意:更新的内容应包含在 "doc" 关键字中,例:
es.py localhost update test_index test_type 1
{
"doc":{
"content" : "hello world"
}
}
如此,索引 test_index 的类型 test_type 中 id 为 1 的文档的
content 字段内容更新为"hello world"
''',
'search':
'''search - 查询 ElasticSearch 指定内容
支持 查询指定id的文档内容、查询指定type、查询指定index、
查询所有index 四种格式
''',
'cat':
'''cat - 查看 ElasticSearch 指定状态
默认查看当前所有索引
'''
},
'example':{
'addr' :
'''# 查看 ElasticSearch 连接状态
es.py localhost
''',
'insert' :
'''# 增(insert)
# 1. 插入指定 id 的文档
es.py localhost:9200 insert test_index test_type 1
{
"title" : "Good morning",
"content" : "hello"
}
# 2. 插入不指定 id 的文档
es.py localhost insert test_index test_type
输入同上...
# 3. 创建 index
es.py localhost insert test_index_2
{
"settings" : {
"number_of_shards" : 1
},
"mappings" : {
"test_type_2" : {
"properties" : {
"title" : { "type" : "text" },
"content" : { "type" : "text" }
}
}
}
}
''',
'delete' :
'''# 删(delete)
# 1. 删除指定 id 的文档
es.py localhost delete test_index test_type 1
# 2. 删除整个类型(type)
es.py localhost delete test_index test_type
# 3. 删除整个索引(index)
es.py localhost delete test_index
''',
'update' :
'''# 改(update)
# 1. 更新指定id的文档内容(更新的内容应包含在 "doc" 关键字中)
es.py localhost update test_index test_type 1
{
"doc": {
"content" : "hello world"
}
}
''',
'search' :
'''# 查(search)
# 1. 查询指定id的文档内容
es.py localhost search test_index test_type 1
# 2. 查询指定type
es.py localhost search test_index test_type
# 3. 查询指定index
es.py localhost search test_index
# 4. 查询所有index
es.py localhost search
''',
'cat' :
'''# 看(cat)
# 1. 查看 ElasticSearch 所有索引
es.py localhost cat
'''
}
}
key_sort = ['addr','insert','delete','update','search','cat']
printx(h['basic'])
if args['help'] == 'cmd':
printx('Option:')
printx(''.join(map(lambda x:h['option'][x] if h['option'].has_key(x) else '',key_sort)))
printx('例子:')
printx('\n'.join(map(lambda x:h['example'][x] if h['example'].has_key(x) else '',key_sort)))
elif args['help'] == 'addr':
printx('例子:')
printx(h['example']['addr'])
elif args['help'] == 'op':
printx('Option:')
printx(h['option'][args['op']])
printx('例子:')
printx(h['example'][args['op']])
else:
raise Exception,'无法查看 "%s" 的帮助信息'%args[args['help']]
def main():
'''主函数'''
# 获得参数
args = getArguments()
# 判断是否需要输出帮助信息
if args.has_key('help'):
Help(args)
else:
# 尝试连接 ElasticSearch
es = connElasticsearch(args)
# 进行增删改查操作
if args['op'] == 'insert':
Insert(es,args)
elif args['op'] == 'delete':
Delete(es,args)
elif args['op'] == 'update':
Update(es,args)
elif args['op'] == 'search':
Search(es,args)
elif args['op'] == 'cat':
Cat(es,args)
elif args['op'] == None:
Info(es,args)
else:
raise Exception,'无法识别该操作 --> "%s"'%args['op']
if __name__=='__main__':
# main()
try:
main()
except Exception,e:
printx('[ERROR]: ',end='')
printx(str(e))
| 2.921875 | 3 |
dci/alembic/versions/732a3e25e65e_remove_topics_label_column.py | redhat-cip/dci-control-server | 17 | 12763573 | <filename>dci/alembic/versions/732a3e25e65e_remove_topics_label_column.py
#
# Copyright (C) 2018 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""remove topics.label column
Revision ID: <PASSWORD>
Revises: <PASSWORD>
Create Date: 2018-12-13 10:55:36.509774
"""
# revision identifiers, used by Alembic.
revision = '<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
def upgrade():
op.drop_column('topics', 'label')
def downgrade():
pass
| 1.335938 | 1 |
BAIT2123 Internet Of Things/Practical/Practical 7/test09.py | loozixuan/SoftwareSystemsDevelopment-Y2S1 | 3 | 12763574 | <reponame>loozixuan/SoftwareSystemsDevelopment-Y2S1<filename>BAIT2123 Internet Of Things/Practical/Practical 7/test09.py
from time import *
from grovepi import *
from paho.mqtt import publish
MQTT_BROKER = "192.168.56.1"
#MQTT_BROKER = "broker.emqx.io" #using public mqtt broker to act as publisher
MQTT_TOPIC = "HOME/KITCHEN"
data = input("Message Payload: ")
publish.single(MQTT_TOPIC, data, hostname=MQTT_BROKER) | 1.898438 | 2 |
tasks.py | MinchinWeb/gpx-reader | 1 | 12763575 | from minchin.releaser import make_release, vendorize
| 1.132813 | 1 |
metaheuristic_algorithms/hsgo.py | thieu1995/mpoc_code | 0 | 12763576 | <filename>metaheuristic_algorithms/hsgo.py
import numpy as np
from copy import deepcopy
from includes.utils import *
from blockchain_network.simulation import Simulator
class HsgoEngine:
def __init__(self, population_size=50, n_clusters=5, epochs=500, num_simulation_each_solution=2, n_value=None):
self.element_length = 10
self.population_size = population_size
self.n_clusters = n_clusters
self.epochs = epochs
self.num_simulation_each_solution = num_simulation_each_solution
self.n_value = n_value
self.n_elements = int(self.population_size / self.n_clusters)
def compute_fitness(self, solution):
'''
Simulate blockchain network by parameters
'''
# scenario = [[100, 200, 21, 20, 50]]
# scenario = [[100, 200, 21, 20, 50], [100, 200, 21, 20, 75], [100, 200, 21, 20, 100]]
# scenario = [[100, 200, 21, 20, 75]]
# scenario = [[100, 200, 21, 20, 100], [100, 300, 21, 20, 100]]
# scenario = [[100, 200, 21, 20, 50], [100, 300, 21, 20, 50], [100, 200, 21, 20, 75], [100, 300, 21, 20, 75],
# [100, 200, 21, 20, 100], [100, 300, 21, 20, 100]]
if self.n_value == 75:
scenario = [[100, 200, 21, 20, 75]]
elif self.n_value == 100:
scenario = [[100, 200, 21, 20, 100]]
elif self.n_value == 125:
scenario = [[100, 200, 21, 20, 125]]
elif self.n_value == 'all':
scenario = [[100, 200, 21, 20, 75], [100, 200, 21, 20, 100], [100, 200, 21, 20, 125]]
fitness = 0
for _scenario in scenario:
for i in range(self.num_simulation_each_solution):
simulator = Simulator(solution, _scenario[0], _scenario[1], _scenario[2], _scenario[3], _scenario[4])
simulation_result = simulator.simulate_mdpos()
fitness += simulation_result
fitness /= (self.num_simulation_each_solution * len(scenario))
return fitness
def create_population(self):
pop = []
group = []
for i in range(self.n_clusters):
team = []
for j in range(self.n_elements):
solution = random_parameter_combination(self.element_length)
fitness = self.compute_fitness(solution)
team.append([solution, fitness, i])
pop.append([solution, fitness, i])
group.append(team)
return pop, group
def _get_best_solution_in_team(self, group=None):
list_best = []
for i in range(len(group)):
sorted_team = sorted(group[i], key=lambda temp: temp[1])
list_best.append(deepcopy(sorted_team[0]) )
return list_best
def evolve(self):
T0 = 298.15
K = 1.0
beta = 1.0
alpha = 1
epxilon = 0.05
l1 = 5E-2
l2 = 100.0
l3 = 1E-2
H_j = l1 * np.random.uniform()
P_ij = l2 * np.random.uniform()
C_j = l3 * np.random.uniform()
pop, group = self.create_population()
g_best = max(pop, key=lambda x: x[1]) # single element ??? Need check max
p_best = self._get_best_solution_in_team(group)
loss_train = []
for epoch in range(self.epochs):
## Loop based on the number of cluster in swarm (number of gases type)
for i in range(self.n_clusters):
### Loop based on the number of individual in each gases type
for j in range(self.n_elements):
F = -1.0 if np.random.uniform() < 0.5 else 1.0
##### Based on Eq. 8, 9, 10
H_j = H_j * np.exp(-C_j * ( 1.0/np.exp(-epoch/self.epochs) - 1.0/T0 ))
S_ij = K * H_j * P_ij
# print(np.array((S_ij * g_best[0] - group[i][j][0])))
gama = beta * np.exp(- ((p_best[i][1] + epxilon) / (group[i][j][1] + epxilon)))
X_ij = group[i][j][0] + F * np.random.uniform() * gama * (np.array(p_best[i][0]) - np.array(group[i][j][0])) + \
F * np.random.uniform() * alpha * np.array((S_ij * np.array(g_best[0]) - group[i][j][0]))
fit = self.compute_fitness(X_ij)
group[i][j] = [X_ij, fit, i]
pop[i * self.n_elements + j] = [X_ij, fit, i]
## Update Henry's coefficient using Eq.8
H_j = H_j * np.exp(-C_j * (1.0 / np.exp(-epoch / self.epochs) - 1.0 / T0))
## Update the solubility of each gas using Eq.9
S_ij = K * H_j * P_ij
## Rank and select the number of worst agents using Eq. 11
N_w = int(self.population_size * (np.random.uniform(0, 0.1) + 0.1))
## Update the position of the worst agents using Eq. 12
sorted_id_pos = np.argsort([ x[1] for x in pop ])
for item in range(N_w):
id = sorted_id_pos[item]
j = id % self.n_elements
i = int((id-j) / self.n_elements)
X_new = np.random.uniform(0, 1, self.element_length)
fit = self.compute_fitness(X_new)
pop[id] = [X_new, fit, i]
group[i][j] = [X_new, fit, i]
p_best = self._get_best_solution_in_team(group)
current_best = min(pop, key=lambda x: x[1])
if current_best[1] < g_best[1]:
g_best = deepcopy(current_best)
loss_train.append(g_best[1])
print("Generation : {0}, best result so far: {1}".format(epoch + 1, g_best[1]))
print('=== results ===')
print(g_best[0])
print(loss_train)
print('====')
return g_best[0], np.array(loss_train)
| 2.65625 | 3 |
tests/__init__.py | paulross/typin | 7 | 12763577 | # -*- coding: utf-8 -*-
"""Unit test package for typin."""
| 1.023438 | 1 |
doc_v3/apps.py | julics129/clinic_v3 | 0 | 12763578 | from django.apps import AppConfig
class DocV3Config(AppConfig):
name = 'doc_v3'
| 1.015625 | 1 |
var/spack/repos/builtin/packages/cray-fftw/package.py | varioustoxins/spack | 0 | 12763579 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class CrayFftw(Package):
"""FFTW is a C subroutine library for computing the discrete Fourier
transform (DFT) in one or more dimensions, of arbitrary input
size, and of both real and complex data (as well as of even/odd
data, i.e. the discrete cosine/sine transforms or DCT/DST).
This package is a wrapper for Cray's version of FFTW.
To install this package, list it as an external package in packages.yaml,
and make sure to load the correct cray-fftw module. In some cases you
need to load cray-mpich before cray-fftw.
"""
homepage = "https://docs.nersc.gov/development/libraries/fftw/"
has_code = False # Skip attempts to fetch source that is not available
maintainers = ['haampie']
version('3.3.8.8')
version('3.3.8.7')
provides('fftw-api@3')
variant(
'precision', values=any_combination_of(
'float', 'double'
).prohibit_empty_set().with_default('float,double'),
description='Build the selected floating-point precision libraries'
)
variant('openmp', default=False, description="Enable OpenMP support.")
variant('mpi', default=True, description='Activate MPI support')
depends_on('mpi', when='+mpi')
def install(self, spec, prefix):
raise InstallError(
self.spec.format('{name} is not installable, you need to specify '
'it as an external package in packages.yaml'))
@property
def libs(self):
# Reduce repetitions of entries
query_parameters = list(llnl.util.lang.dedupe(
self.spec.last_query.extra_parameters
))
# List of all the suffixes associated with float precisions
precisions = [
('float', 'f'),
('double', ''),
]
# Retrieve the correct suffixes, or use double as a default
suffixes = [v for k, v in precisions if k in query_parameters] or ['']
# Construct the list of libraries that needs to be found
libraries = []
for sfx in suffixes:
if 'mpi' in query_parameters and '+mpi' in self.spec:
libraries.append('libfftw3' + sfx + '_mpi')
if 'openmp' in query_parameters and '+openmp' in self.spec:
libraries.append('libfftw3' + sfx + '_omp')
libraries.append('libfftw3' + sfx)
return find_libraries(libraries, root=self.prefix, recursive=True)
| 2.03125 | 2 |
nineml/visitors/equality.py | INCF/nineml-python | 6 | 12763580 | from builtins import zip
import math
import sympy
from itertools import chain
from .base import BaseVisitor, BaseDualVisitor, DualWithContextMixin
from nineml.exceptions import (NineMLDualVisitException,
NineMLDualVisitValueException,
NineMLDualVisitTypeException,
NineMLDualVisitKeysMismatchException,
NineMLDualVisitNoneChildException,
NineMLNotBoundException,
NineMLDualVisitAnnotationsMismatchException,
NineMLNameError)
NEARLY_EQUAL_PLACES_DEFAULT = 15
class EqualityChecker(BaseDualVisitor):
def __init__(self, annotations_ns=[], check_urls=True,
nearly_equal_places=NEARLY_EQUAL_PLACES_DEFAULT, **kwargs): # @UnusedVariable @IgnorePep8
super(EqualityChecker, self).__init__(**kwargs)
self.annotations_ns = annotations_ns
self.check_urls = check_urls
self.nearly_equal_places = nearly_equal_places
def check(self, obj1, obj2, **kwargs):
try:
self.visit(obj1, obj2, **kwargs)
except NineMLDualVisitException:
return False
return True
def action(self, obj1, obj2, nineml_cls, **kwargs):
if self.annotations_ns:
try:
annotations_keys = set(chain(obj1.annotations.branch_keys,
obj2.annotations.branch_keys))
skip_annotations = False
except AttributeError:
skip_annotations = True
if not skip_annotations:
for key in annotations_keys:
if key[1] in self.annotations_ns:
try:
annot1 = obj1.annotations.branch(key)
except NineMLNameError:
self._raise_annotations_exception(
nineml_cls, obj1, obj2, key)
try:
annot2 = obj2.annotations.branch(key)
except NineMLNameError:
self._raise_annotations_exception(
nineml_cls, obj1, obj2, key)
self.visit(annot1, annot2, **kwargs)
return super(EqualityChecker, self).action(obj1, obj2, nineml_cls,
**kwargs)
def default_action(self, obj1, obj2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
for attr_name in nineml_cls.nineml_attr:
if attr_name == 'rhs': # need to use Sympy equality checking
self._check_rhs(obj1, obj2, nineml_cls)
else:
self._check_attr(obj1, obj2, attr_name, nineml_cls)
def action_reference(self, ref1, ref2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
if self.check_urls:
self._check_attr(ref1, ref2, 'url', nineml_cls)
def action_definition(self, def1, def2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
if self.check_urls:
self._check_attr(def1, def2, 'url', nineml_cls)
def action_singlevalue(self, val1, val2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
if self._not_nearly_equal(val1.value, val2.value):
self._raise_value_exception('value', val1, val2, nineml_cls)
def action_arrayvalue(self, val1, val2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
if len(val1.values) != len(val2.values):
self._raise_value_exception('values', val1, val2, nineml_cls)
if any(self._not_nearly_equal(s, o)
for s, o in zip(val1.values, val2.values)):
self._raise_value_exception('values', val1, val2, nineml_cls)
def action_unit(self, unit1, unit2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
# Ignore name
self._check_attr(unit1, unit2, 'power', nineml_cls)
self._check_attr(unit1, unit2, 'offset', nineml_cls)
def action_dimension(self, dim1, dim2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
# Ignore name
for sym in nineml_cls.dimension_symbols:
self._check_attr(dim1, dim2, sym, nineml_cls)
def action__annotationsbranch(self, branch1, branch2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
for attr in nineml_cls.nineml_attr:
if attr != 'abs_index':
self._check_attr(branch1, branch2, attr, nineml_cls)
def _check_rhs(self, expr1, expr2, nineml_cls):
try:
expr_eq = (sympy.expand(expr1.rhs - expr2.rhs) == 0)
except TypeError:
expr_eq = sympy.Equivalent(expr1.rhs, expr2.rhs) == sympy.true
if not expr_eq:
self._raise_value_exception('rhs', expr1, expr2, nineml_cls)
def _check_attr(self, obj1, obj2, attr_name, nineml_cls):
try:
attr1 = getattr(obj1, attr_name)
except NineMLNotBoundException:
attr1 = None
try:
attr2 = getattr(obj2, attr_name)
except NineMLNotBoundException:
attr2 = None
if attr1 != attr2:
self._raise_value_exception(attr_name, obj1, obj2, nineml_cls)
def _raise_annotations_exception(self, nineml_cls, obj1, obj2, key):
raise NineMLDualVisitException()
def _raise_value_exception(self, attr_name, obj1, obj2, nineml_cls):
raise NineMLDualVisitException()
def _not_nearly_equal(self, float1, float2):
"""
Determines whether two floating point numbers are nearly equal (to
within reasonable rounding errors
"""
mantissa1, exp1 = math.frexp(float1)
mantissa2, exp2 = math.frexp(float2)
return not ((round(mantissa1, self.nearly_equal_places) ==
round(mantissa2, self.nearly_equal_places)) and
exp1 == exp2)
class Hasher(BaseVisitor):
seed = 0x9e3779b97f4a7c17
def __init__(self, nearly_equal_places=NEARLY_EQUAL_PLACES_DEFAULT,
**kwargs): # @UnusedVariable @IgnorePep8
super(Hasher, self).__init__(**kwargs)
self.nearly_equal_places = nearly_equal_places
def hash(self, nineml_obj):
self._hash = None
self.visit(nineml_obj)
return self._hash
def default_action(self, obj, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
for attr_name in nineml_cls.nineml_attr:
try:
if attr_name == 'rhs': # need to use Sympy equality checking
self._hash_rhs(obj.rhs)
else:
self._hash_attr(getattr(obj, attr_name))
except NineMLNotBoundException:
continue
def _hash_attr(self, attr):
attr_hash = hash(attr)
if self._hash is None:
self._hash = attr_hash
else:
# The rationale behind this equation is explained here
# https://stackoverflow.com/questions/5889238/why-is-xor-the-default-way-to-combine-hashes
self._hash ^= (attr_hash + self.seed + (self._hash << 6) +
(self._hash >> 2))
def action_reference(self, ref, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
self._hash_attr(ref.url)
def action_definition(self, defn, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
self._hash_attr(defn.url)
def action_singlevalue(self, val, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
self._hash_value(val.value)
def action_arrayvalue(self, val, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
for v in val.values:
self._hash_value(v)
def _hash_rhs(self, rhs, **kwargs): # @UnusedVariable
try:
rhs = sympy.expand(rhs)
except:
pass
self._hash_attr(rhs)
def action_unit(self, unit, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
# Ignore name
self._hash_attr(unit.power)
self._hash_attr(unit.offset)
def action_dimension(self, dim, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
for sym in nineml_cls.dimension_symbols:
self._hash_attr(getattr(dim, sym))
def _hash_value(self, val):
mantissa, exp = math.frexp(val)
rounded_val = math.ldexp(round(mantissa, self.nearly_equal_places),
exp)
self._hash_attr(rounded_val)
class MismatchFinder(DualWithContextMixin, EqualityChecker):
def __init__(self, **kwargs):
EqualityChecker.__init__(self, **kwargs)
DualWithContextMixin.__init__(self)
def find(self, obj1, obj2, **kwargs): # @UnusedVariable
self.mismatch = []
self.visit(obj1, obj2)
assert not self.contexts1
assert not self.contexts2
return '\n'.join(str(e) for e in self.mismatch)
def visit(self, *args, **kwargs):
try:
super(MismatchFinder, self).visit(*args, **kwargs)
except NineMLDualVisitException as e:
self.mismatch.append(e)
def visit_child(self, child_name, child_type, parent1, parent2,
parent_cls, parent_result, **kwargs):
try:
super(MismatchFinder, self).visit_child(
child_name, child_type, parent1, parent2, parent_cls,
parent_result, **kwargs)
except NineMLDualVisitException as e:
self.mismatch.append(e)
self._pop_contexts()
def visit_children(self, children_type, parent1, parent2,
parent_cls, parent_result, **kwargs):
try:
super(MismatchFinder, self).visit_children(
children_type, parent1, parent2, parent_cls, parent_result,
**kwargs)
except NineMLDualVisitException as e:
self.mismatch.append(e)
self._pop_contexts()
def _check_attr(self, obj1, obj2, attr_name, nineml_cls, **kwargs):
try:
super(MismatchFinder, self)._check_attr(
obj1, obj2, attr_name, nineml_cls, **kwargs)
except NineMLDualVisitException as e:
self.mismatch.append(e)
def _check_rhs(self, obj1, obj2, attr_name, **kwargs):
try:
super(MismatchFinder, self)._check_rhs(
obj1, obj2, attr_name, **kwargs)
except NineMLDualVisitException as e:
self.mismatch.append(e)
def action_singlevalue(self, val1, val2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
try:
super(MismatchFinder, self).action_singlevalue(
val1, val2, nineml_cls, **kwargs)
except NineMLDualVisitException as e:
self.mismatch.append(e)
def action_arrayvalue(self, val1, val2, nineml_cls, **kwargs): # @UnusedVariable @IgnorePep8
try:
super(MismatchFinder, self).action_arrayvalue(
val1, val2, nineml_cls, **kwargs)
except NineMLDualVisitException as e:
self.mismatch.append(e)
def _raise_annotations_exception(self, nineml_cls, obj1, obj2, key):
raise NineMLDualVisitAnnotationsMismatchException(
nineml_cls, obj1, obj2, key, self.contexts1, self.contexts2)
def _raise_value_exception(self, attr_name, obj1, obj2, nineml_cls):
raise NineMLDualVisitValueException(
attr_name, obj1, obj2, nineml_cls, self.contexts1, self.contexts2)
def _raise_type_exception(self, obj1, obj2):
raise NineMLDualVisitTypeException(
obj1, obj2, self.contexts1, self.contexts2)
def _raise_none_child_exception(self, child_name, child1, child2):
raise NineMLDualVisitNoneChildException(
child_name, child1, child2, self.contexts1, self.contexts2)
def _raise_keys_mismatch_exception(self, children_type, obj1, obj2):
raise NineMLDualVisitKeysMismatchException(
children_type, obj1, obj2, self.contexts1, self.contexts2)
def _pop_contexts(self):
self.contexts1.pop()
self.contexts2.pop()
| 2.21875 | 2 |
tripleo_common/image/base.py | openstack/tripleo-common | 52 | 12763581 | <gh_stars>10-100
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import json
import os
import yaml
from oslo_log import log
from tripleo_common.image.exception import ImageSpecificationException
class BaseImageManager(object):
logger = log.getLogger(__name__ + '.BaseImageManager')
APPEND_ATTRIBUTES = ['elements', 'options', 'packages']
CONFIG_SECTIONS = (
DISK_IMAGES, UPLOADS, CONTAINER_IMAGES,
CONTAINER_IMAGES_TEMPLATE
) = (
'disk_images', 'uploads', 'container_images',
'container_images_template'
)
def __init__(self, config_files, images=None):
self.config_files = config_files
self.images = images
def _extend_or_set_attribute(self, existing_image, image, attribute_name):
attribute = image.get(attribute_name)
if attribute:
try:
existing_image[attribute_name].update(attribute)
except AttributeError:
existing_image[attribute_name].extend(attribute)
except KeyError:
existing_image[attribute_name] = attribute
def load_config_files(self, section):
config_data = collections.OrderedDict()
for config_file in self.config_files:
if os.path.isfile(config_file):
with open(config_file) as cf:
data = yaml.safe_load(cf.read()).get(section)
if not data:
return None
self.logger.debug('%s JSON: %s', section, str(data))
for item in data:
image_name = item.get('imagename')
if image_name is None:
msg = 'imagename is required'
self.logger.error(msg)
raise ImageSpecificationException(msg)
if self.images is not None and \
image_name not in self.images:
self.logger.debug('Image %s ignored', image_name)
continue
existing_image = config_data.get(image_name)
if not existing_image:
config_data[image_name] = item
continue
for attr in self.APPEND_ATTRIBUTES:
self._extend_or_set_attribute(existing_image, item,
attr)
# If a new key is introduced, add it.
for key, value in item.items():
if key not in existing_image:
existing_image[key] = item[key]
config_data[image_name] = existing_image
else:
self.logger.error('No config file exists at: %s', config_file)
raise IOError('No config file exists at: %s' % config_file)
return [x for x in config_data.values()]
def json_output(self):
self.logger.info('Using config files: %s', self.config_files)
disk_images = self.load_config_files(self.DISK_IMAGES)
print(json.dumps(disk_images))
| 1.71875 | 2 |
web/schema/__init__.py | ShenTengTu/py_sms_impl | 0 | 12763582 | from functools import lru_cache
from pydantic import BaseSettings, BaseModel
class StringConstraint(BaseModel):
min_length: int = None
max_length: int = None
regex: str = None
class _Constraints(BaseSettings):
user_id: StringConstraint = StringConstraint(
min_length=4, max_length=32, regex=r"^[a-zA-Z]([_-]?[a-zA-Z0-9]+)+$"
)
password: StringConstraint = StringConstraint(
min_length=8, regex=r"^(?=.*[0-9])(?=.*[a-zA-Z])([a-zA-Z0-9@#$%^&+=*._-]){8,}$"
)
@lru_cache()
def constraints():
return _Constraints()
| 2.625 | 3 |
examples/visualizations/compare_logscale.py | cdagnino/ngboost | 3 | 12763583 | import numpy as np
import scipy as sp
import scipy.stats
from ngboost.distns import Normal, Laplace, LogNormal, LogLaplace
from ngboost.ngboost import NGBoost
from ngboost.scores import MLE, CRPS, MLE_SURV, CRPS_SURV
from ngboost.learners import default_tree_learner, default_linear_learner
from ngboost.evaluation import *
from sklearn.metrics import r2_score
from matplotlib import pyplot as plt
from argparse import ArgumentParser
if __name__ == '__main__':
argparser = ArgumentParser()
argparser.add_argument("--dist", type=str, default="Laplace")
argparser.add_argument("--noise-dist", type=str, default="Normal")
args = argparser.parse_args()
m, n = 1000, 50
if args.noise_dist == "Normal":
noise = np.random.randn(*(m, 1))
elif args.noise_dist == "Laplace":
noise = sp.stats.laplace.rvs(size=(m, 1))
beta = np.random.randn(n, 1)
X = np.random.randn(m, n) / np.sqrt(n)
Y = np.exp(X @ beta + 0.5 * noise)
print(X.shape, Y.shape)
dist = eval("Log" + args.dist)
ngb = NGBoost(n_estimators=50, learning_rate=0.5,
Dist=dist,
Base=default_linear_learner,
natural_gradient=False,
minibatch_frac=1.0,
Score=CRPS())
losses = ngb.fit(X, Y)
preds = ngb.pred_dist(X)
print(f"R2: {r2_score(Y, np.exp(preds.loc)):.4f}")
pctles, observed, slope, intercept = calibration_regression(preds, Y)
plt.figure(figsize = (8, 3))
plt.subplot(1, 2, 1)
plot_pit_histogram(pctles, observed)
plt.title("Original scale")
Y = np.log(Y)
dist = eval(args.dist)
ngb = NGBoost(n_estimators=50, learning_rate=0.5,
Dist=dist,
Base=default_linear_learner,
natural_gradient=False,
minibatch_frac=1.0,
Score=CRPS())
losses = ngb.fit(X, Y)
preds = ngb.pred_dist(X)
print(f"R2: {r2_score(Y, np.exp(preds.loc)):.4f}")
pctles, observed, slope, intercept = calibration_regression(preds, Y)
plt.subplot(1, 2, 2)
plot_pit_histogram(pctles, observed)
plt.title("Log-scale")
plt.tight_layout()
plt.savefig("./figures/pit_logscale.pdf")
plt.show()
| 2.515625 | 3 |
day_11/part_2.py | berkanteber/advent-of-code-2021 | 0 | 12763584 | <gh_stars>0
import os
input_path = os.path.join(os.path.dirname(__file__), "input.txt")
with open(input_path) as f:
data = f.read()
def solve(data: str) -> int:
energy_levels = {}
for r, line in enumerate(data.splitlines()):
for c, energy in enumerate(line):
energy_levels[r, c] = int(energy)
step = 1
while True:
for octopus in energy_levels:
energy_levels[octopus] += 1
flashed = set()
to_flash = {
octopus
for octopus, energy_level in energy_levels.items()
if energy_level == 10
}
while to_flash:
octopus = to_flash.pop()
energy_levels[octopus] = 0
flashed.add(octopus)
r, c = octopus
neighbors = (
*((r + 1, c - 1), (r + 1, c), (r + 1, c + 1)),
*((r - 1, c - 1), (r - 1, c), (r - 1, c + 1)),
*((r, c - 1), (r, c + 1)),
)
for neighbor in neighbors:
n_r, n_c = neighbor
if not (0 <= n_r < 10 and 0 <= n_c < 10):
continue
if neighbor in flashed or neighbor in to_flash:
continue
energy_levels[neighbor] += 1
if energy_levels[neighbor] == 10:
to_flash.add(neighbor)
if len(flashed) == 100:
break
step += 1
return step
example_data = """\
5483143223
2745854711
5264556173
6141336146
6357385478
4167524645
2176841721
6882881134
4846848554
5283751526
"""
assert solve(example_data) == 195
print(solve(data)) # 242
| 2.890625 | 3 |
TWLight/message_storage.py | aacaldwell/TWLight | 67 | 12763585 | <reponame>aacaldwell/TWLight
from django.contrib.messages.storage.session import SessionStorage
from django.contrib.messages.storage.base import Message
from .view_mixins import DedupMessageMixin
class SessionDedupStorage(DedupMessageMixin, SessionStorage):
"""
Custom session storage to prevent storing duplicate messages.
cribbed directly from: https://stackoverflow.com/a/25157660
"""
pass
| 1.898438 | 2 |
ample/parsers/tests/test_tm_parser.py | fsimkovic/ample | 6 | 12763586 | <gh_stars>1-10
"""Test functions for parsers.tm_parser"""
import os
import unittest
from ample import constants
from ample.parsers import tm_parser
class TestTMscore(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.thisd = os.path.abspath(os.path.dirname(__file__))
cls.ample_share = constants.SHARE_DIR
cls.testfiles_dir = os.path.join(cls.ample_share, 'testfiles')
def test_parse(self):
logfile = os.path.join(self.testfiles_dir, "tmscore.log")
TM = tm_parser.TMscoreLogParser()
TM.parse(logfile)
self.assertEqual(173, TM.nr_residues_common)
self.assertEqual(6.654, TM.rmsd)
self.assertEqual(0.5512, TM.tm)
self.assertEqual(0.3147, TM.maxsub)
self.assertEqual(0.4292, TM.gdtts)
self.assertEqual(0.2283, TM.gdtha)
class TestTMalign(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.thisd = os.path.abspath(os.path.dirname(__file__))
cls.ample_share = constants.SHARE_DIR
cls.testfiles_dir = os.path.join(cls.ample_share, 'testfiles')
def test_parse(self):
logfile = os.path.join(self.testfiles_dir, "tmalign.log")
TM = tm_parser.TMalignLogParser()
TM.parse(logfile)
self.assertEqual(143, TM.nr_residues_common)
self.assertEqual(0.70502, TM.tm)
self.assertEqual(2.68, TM.rmsd)
self.assertEqual(0.182, TM.seq_id)
if __name__ == "__main__":
unittest.main()
| 2.453125 | 2 |
E03 - Learning programs and models/Architectures/models/hrnet.py | mialona/Stomatal-segmentation | 0 | 12763587 | <filename>E03 - Learning programs and models/Architectures/models/hrnet.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from .segbase import SegBaseModel
__all__ = ['HRNet']
class HRNet(SegBaseModel):
def __init__(self, nclass, backbone_name='hrnet_w30', norm_layer=nn.BatchNorm2d, BN_MOMENTUM = 0.01, FINAL_CONV_KERNEL = 1):
self.backbone_name = backbone_name
self.nclass = nclass
self.norm_layer = norm_layer
super(HRNet, self).__init__(backbone_name=self.backbone_name, nclass=self.nclass, need_backbone=True)
self.head = nn.Sequential(
nn.Conv2d(
in_channels=self.backbone.last_inp_channels,
out_channels=self.backbone.last_inp_channels,
kernel_size=1,
stride=1,
padding=0),
norm_layer(self.backbone.last_inp_channels, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True),
nn.Conv2d(
in_channels=self.backbone.last_inp_channels,
out_channels=nclass,
kernel_size=FINAL_CONV_KERNEL,
stride=1,
padding=1 if FINAL_CONV_KERNEL == 3 else 0)
)
def forward(self, x):
height, width = x.shape[2], x.shape[3]
x = self.backbone(x)
x = self.head(x)
x = F.interpolate(x, size=(height, width), mode='bilinear', align_corners=True)
return x | 2.71875 | 3 |
src/project/ecommerce/models.py | Kaushal1011/django-ecommerce-graphql | 7 | 12763588 | from django.db import models
from django.conf import settings
class Product(models.Model):
product_id = models.AutoField(primary_key=True)
product_name = models.CharField(max_length=100)
product_category = models.TextField()
product_price = models.FloatField()
product_discount_price = models.FloatField(blank=True, null=True)
product_preview_desc = models.CharField(max_length=200, null=True)
product_full_desc = models.CharField(max_length=400, null=True)
class OrderProduct(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
order_item = models.ForeignKey(Product, on_delete=models.CASCADE)
order_quantity = models.IntegerField(default=1)
class Categories(models.Model):
category_name = models.CharField(max_length=50)
category_description = models.TextField(null=True)
class Orders(models.Model):
order_no = models.AutoField(primary_key=True)
username = models.TextField()
order_product = models.ForeignKey(OrderProduct, on_delete=models.CASCADE)
| 2.078125 | 2 |
main.py | putt/futures-collect | 1 | 12763589 | <reponame>putt/futures-collect<filename>main.py<gh_stars>1-10
#-*- coding=utf-8 -*-
# from FinalLogger import logger
# from Constant import inst_strategy, suffix_list
import urllib.request
import sqlite3
import pandas as pd
from pvplot import PriceVolumePlotter
import numpy as np
from datetime import datetime
import time
# sqlite 无法直接处理int64的数据,导致pandas的int64输入sqlite以后变成blob字段
# via: https://stackoverflow.com/questions/49456158/integer-in-python-pandas-becomes-blob-binary-in-sqlite
sqlite3.register_adapter(np.int64, lambda val: int(val))
sqlite3.register_adapter(np.int32, lambda val: int(val))
conn = sqlite3.connect('futures.db3', check_same_thread = False)
contracts = []
with open('contracts.txt') as f:
contracts = f.read().splitlines()
f.closed
for i in contracts :
cmd = "CREATE TABLE IF NOT EXISTS " + i + "_5MBar(d TEXT PRIMARY KEY , o DOUBLE , h DOUBLE, l DOUBLE, c DOUBLE, v INTEGER, p INTEGER)"
conn.execute(cmd)
bar_url = 'https://stock2.finance.sina.com.cn/futures/api/jsonp.php/var%20_{}_{}=/InnerFuturesNewService.getFewMinLine?symbol={}&type={}'
tick_url = 'https://hq.sinajs.cn/?_={}/&list=nf_{}'
def dealZhengZhou(symbol):
if symbol[0].isupper():
inst = symbol.lower()
# 郑州商品交易所 CZCE TA001 -> ta2001
# inst = inst[:-3]+'2'+inst[-3:]
return inst
return symbol
def volumeIncrease(pv_data, price, volume):
if (price in pv_data):
pv_data[price] += volume
else:
pv_data[price] = volume
def collectTick(symbol):
# 合约需要用大写字母
url = tick_url.format(now.timestamp(), symbol.upper())
print(url)
tick = urllib.request.urlopen(url).read().split(b',')
time = tick[1].decode('utf8')
return (':'.join((time[:2], time[2:4], time[4:])), float(tick[8]))
def collectBar(symbol):
inst = dealZhengZhou(symbol)
url = bar_url.format(inst, now.timestamp(), inst, 5)
bar_table = symbol + '_5MBar'
#获取本地数据
local_bars = pd.read_sql("SELECT * from {} ORDER BY d".format(bar_table), conn, index_col='d')
# 获取新数据
print(url)
results = urllib.request.urlopen(url).read().decode('utf8')
remote_bars = pd.read_json(results[results.find('(')+1: results.find(')')]).set_index('d')
# 没有本地数据(新加入合约),增加15m数据
if local_bars.empty:
new_bars = remote_bars
# 15m数据补充
url = bar_url.format(inst, now.timestamp(), inst, 15)
print(url)
results = urllib.request.urlopen(url).read().decode('utf8')
bars = pd.read_json(results[results.find('(')+1: results.find(')')]).set_index('d')
local_bars = bars.loc[bars.index<remote_bars.index[0]]
local_bars.to_sql(bar_table, conn, if_exists='append')
else:
new_bars = remote_bars.loc[remote_bars.index>local_bars.index[-1]]
# 更新数据,因为最新的bar记录不完整,只更新到倒数第二bar
update_bars = new_bars.iloc[:-1]
if not update_bars.empty:
update_bars.to_sql(bar_table, conn, if_exists='append')
# 整合所有数据
return pd.concat([local_bars, new_bars], sort=True)
if __name__=="__main__":
pvplot = PriceVolumePlotter()
# for symbol in contracts:
while 1:
now = datetime.now()
print ('{}'.format(now))
for symbol in contracts:
# print (symbol, now)
pvplot.plot(symbol, collectBar(symbol), collectTick(symbol))
time.sleep(3)
hour = now.hour
while (hour>2 and hour<9) or (hour > 14 and hour <21):
print("Waiting for trading time.")
time.sleep(30)
hour = datetime.now().hour
| 2.28125 | 2 |
scaffold/masterpage.py | allankellynet/mimas | 0 | 12763590 | #-----------------------------------------------------
# Mimas: conference submission and review system
# (c) <NAME> 2016-2020 http://www.allankelly.net
# Licensed under MIT License, see LICENSE file
# -----------------------------------------------------
# System imports
# Google imports
import logging
# Local imports
import basehandler
from speaker_lib import speaker_checks
from submission_lib import submissions_aux, submissionrecord, submissionnotifynames
class MasterControlPage(basehandler.BaseHandler):
def check_blank_bios(self):
msg = "<h1>Blank bio report</h1>"
blank_list = speaker_checks.find_blank_bio_submissions(self.get_crrt_conference_key())
msg += "<p>Conference: " + self.get_crrt_conference_key().get().name
msg += "<p>Speakers with a blank bio: " + str(len(blank_list))
msg += "\n<p><table><th>Name<th>email<th>Created"
for blank in blank_list:
msg += "\n<tr><td>" + blank.name + "<td>" + blank.email + "<td>" + blank.created_date.isoformat()
msg += "\n</table>"
self.response.write(msg)
def change_submission_communication(self):
conf_key = self.get_crrt_conference_key()
submissions = submissionrecord.retrieve_conference_submissions(conf_key)
for sub in submissions:
logging.info("Change subs comms:" + sub.communication + ";" + submissionnotifynames.SUBMISSION_ACCEPT_ACKNOWLEDGED + ".")
if sub.communication == submissionnotifynames.SUBMISSION_ACCEPT_ACKNOWLEDGED:
sub.acknowledge_receipt()
self.response.write("Done: submission comms updated")
def fix_expenses(self, criteria, new_value):
conf_key = self.get_crrt_conference_key()
submissions_aux.change_all_expenses(conf_key, criteria, new_value)
self.response.write("Done: " + criteria + " -> " + new_value)
def get(self):
template_values = {
}
self.write_page("scaffold/masterpage.html", template_values)
def post(self):
if self.request.get("BlankBio"):
self.check_blank_bios()
if self.request.get("ShortHaul"):
self.fix_expenses("Option9", "Option15")
if self.request.get("LongHaul"):
self.fix_expenses("Option8", "Option16")
if self.request.get("AckReceipt"):
self.change_submission_communication()
| 2.34375 | 2 |
game/client/view/pad/legend.py | AntonYermilov/progue | 0 | 12763591 | from game.client.view.pad.pad import Pad
class LegendPad(Pad):
TEXT_COLOR = '#eaeaea'
HIGHLIGHTED_TEXT_COLOR = '#e6e600'
BACKGROUND_COLOR = '#26004d'
# BACKGROUND_COLOR = '#000000'
INVENTORY_TEXT = ' Inventory '
SKIP_TEXT = ' Skip turn '
NEXT_TEXT = ' Next (↓) '
PREV_TEXT = ' Prev (↑) '
USE_TEXT = ' Use '
DROP_TEXT = ' Drop '
QUIT_TEXT = ' Quit '
ORDER = [INVENTORY_TEXT, SKIP_TEXT, NEXT_TEXT, PREV_TEXT, USE_TEXT, DROP_TEXT]
@staticmethod
def _get_shift(text):
shift = 0
for btn_text in LegendPad.ORDER:
if btn_text == text:
break
shift += len(btn_text) + 4
return shift
def _refresh_background(self):
void_color = self.view.entities_desc['map']['void']['background_color']
for x in range(self.x0, self.x1):
for y in range(self.y0, self.y1):
self.view._put_colored_symbol(x=x, y=y, c=' ', color=void_color, bkcolor=void_color)
def _refresh_inventory(self):
x, y = self.x0 + self._get_shift(self.INVENTORY_TEXT), self.y1 - 1
xi = self.INVENTORY_TEXT.index('I')
self.view._put_colored_text(x, y, self.INVENTORY_TEXT, self.TEXT_COLOR, self.BACKGROUND_COLOR)
self.view._put_colored_symbol(x + xi, y, 'I', self.HIGHLIGHTED_TEXT_COLOR, self.BACKGROUND_COLOR)
def _refresh_skip(self):
x, y = self.x0 + self._get_shift(self.SKIP_TEXT), self.y1 - 1
xs = self.SKIP_TEXT.index('S')
self.view._put_colored_text(x, y, self.SKIP_TEXT, self.TEXT_COLOR, self.BACKGROUND_COLOR)
self.view._put_colored_symbol(x + xs, y, 'S', self.HIGHLIGHTED_TEXT_COLOR, self.BACKGROUND_COLOR)
def _refresh_next(self):
if not self.view.model.inventory.is_opened():
return
x, y = self.x0 + self._get_shift(self.NEXT_TEXT), self.y1 - 1
xd = self.NEXT_TEXT.index('↓')
self.view._put_colored_text(x, y, self.NEXT_TEXT, self.TEXT_COLOR, self.BACKGROUND_COLOR)
self.view._put_colored_symbol(x + xd, y, '↓', self.HIGHLIGHTED_TEXT_COLOR, self.BACKGROUND_COLOR)
def _refresh_prev(self):
if not self.view.model.inventory.is_opened():
return
x, y = self.x0 + self._get_shift(self.PREV_TEXT), self.y1 - 1
xu = self.PREV_TEXT.index('↑')
self.view._put_colored_text(x, y, self.PREV_TEXT, self.TEXT_COLOR, self.BACKGROUND_COLOR)
self.view._put_colored_symbol(x + xu, y, '↑', self.HIGHLIGHTED_TEXT_COLOR, self.BACKGROUND_COLOR)
def _refresh_use(self):
if not self.view.model.inventory.is_opened():
return
if self.view.model.inventory.get_selected_item() is None:
return
x, y = self.x0 + self._get_shift(self.USE_TEXT), self.y1 - 1
xu = self.USE_TEXT.index('U')
self.view._put_colored_text(x, y, self.USE_TEXT, self.TEXT_COLOR, self.BACKGROUND_COLOR)
self.view._put_colored_symbol(x + xu, y, 'U', self.HIGHLIGHTED_TEXT_COLOR, self.BACKGROUND_COLOR)
def _refresh_drop(self):
if not self.view.model.inventory.is_opened():
return
if self.view.model.inventory.get_selected_item() is None:
return
x, y = self.x0 + self._get_shift(self.DROP_TEXT), self.y1 - 1
xd = self.DROP_TEXT.index('D')
self.view._put_colored_text(x, y, self.DROP_TEXT, self.TEXT_COLOR, self.BACKGROUND_COLOR)
self.view._put_colored_symbol(x + xd, y, 'D', self.HIGHLIGHTED_TEXT_COLOR, self.BACKGROUND_COLOR)
def _refresh_quit(self):
if self.view.model.inventory.is_opened():
return
x, y = self.x1 - len(self.QUIT_TEXT), self.y1 - 1
xq = self.QUIT_TEXT.index('Q')
self.view._put_colored_text(x, y, self.QUIT_TEXT, self.TEXT_COLOR, self.BACKGROUND_COLOR)
self.view._put_colored_symbol(x + xq, y, 'Q', self.HIGHLIGHTED_TEXT_COLOR, self.BACKGROUND_COLOR)
def refresh(self):
self._refresh_background()
if self.view.model.hero.stats.health > 0:
self._refresh_inventory()
self._refresh_skip()
self._refresh_next()
self._refresh_prev()
self._refresh_use()
self._refresh_drop()
self._refresh_quit()
| 2.5 | 2 |
impy/tests/test_all_lab.py | kotania/impy | 6 | 12763592 | from __future__ import print_function
import sys
import os
import numpy as np
from multiprocessing import Pool, freeze_support
import tempfile
from impy.definitions import *
from impy.constants import *
from impy.kinematics import EventKinematics
from impy import impy_config, pdata
from impy.util import info
# AF: This is what the user interaction has to yield.
# It is the typical expected configuration that one
# wants to run (read pp-mode at energies not exceeding
# 7 TeV). If you want cosmic ray energies, this should
# be rather p-N at 10 EeV and lab frame (not yet defined).
event_kinematics = EventKinematics(ecm=7000 * GeV,
p1pdg=2212,
p2pdg=2212
# nuc2_prop=(14,7)
)
impy_config["user_frame"] = 'center-of-mass'
gen_list = [
'SIBYLL23D',
'SIBYLL23C',
'SIBYLL23',
'SIBYLL21',
'DPMJETIII306',
'DPMJETIII191',
'EPOSLHC',
'PHOJET112',
'PHOJET191',
'URQMD34',
# 'PYTHIA8',
'QGSJET01C',
'QGSJETII03',
'QGSJETII04'
]
xlab_bins = np.linspace(0,1,21)
xlab_widths = xlab_bins[1:] - xlab_bins[:-1]
xlab_centers = 0.5*(xlab_bins[1:] + xlab_bins[:-1])
nevents = 5000
norm = 1./float(nevents)/xlab_widths
def run_generator(gen,*args):
print('Testing',gen)
hist_p = np.zeros(len(xlab_centers))
hist_pi = np.zeros(len(xlab_centers))
try:
log = tempfile.mkstemp()[1]
generator = make_generator_instance(interaction_model_by_tag[gen])
generator.init_generator(event_kinematics,logfname=log)
for event in generator.event_generator(event_kinematics, nevents):
event.filter_final_state_charged()
hist_p += np.histogram(event.xlab[event.p_ids == 2212],
bins=xlab_bins,
weights=event.xlab[event.p_ids == 2212]**1.7)[0]
hist_pi += np.histogram(event.xlab[np.abs(event.p_ids) == 211],
bins=xlab_bins,
weights=event.xlab[np.abs(event.p_ids) == 211]**1.7)[0]
return True, gen, log, hist_p, hist_pi
except:
return False, gen, log, hist_p, hist_pi
if __name__ in ['__main__', '__test__']:
freeze_support()
pool = Pool(processes=32)
result = [pool.apply_async(run_generator, (gen,)) for gen in gen_list]
result = [res.get(timeout=100000) for res in result]
logs = {}
xlab_protons = {}
xlab_piplus = {}
failed = []
passed = []
for r, gen, log, hist_p, hist_pi in result:
if r:
passed.append(gen)
xlab_protons[gen] = hist_p
xlab_piplus[gen] = hist_pi
else:
failed.append(gen)
with open(log) as f:
logs[gen] = f.read()
info(0, 'Test results for 158 GeV pC collisions in lab frame:\n')
info(0, 'Passed:', '\n', '\n '.join(passed))
info(0, '\nFailed:', '\n', '\n '.join(failed))
import pickle
pickle.dump((xlab_bins, xlab_protons, xlab_piplus, logs),
open(os.path.splitext(__file__)[0] + '.pkl','wb'), protocol=-1)
| 2.078125 | 2 |
LC_problems/15.py | Howardhuang98/Blog | 0 | 12763593 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : 15.py
@Contact : <EMAIL>
@Modify Time : 2022/4/14 20:57
------------
"""
import itertools
from typing import List
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
nums.sort()
memo = set()
ans = []
for i in range(len(nums)):
if i >= 1 and nums[i] == nums[i - 1]:
continue
a = nums[i]
left = i + 1
right = len(nums) - 1
while left < right:
temp = a + nums[left] + nums[right]
if temp < 0:
left += 1
elif temp == 0:
if (a, nums[left], nums[right]) not in memo:
ans.append([a, nums[left], nums[right]])
memo.add((a, nums[left], nums[right]))
right -= 1
left += 1
elif temp > 0:
right -= 1
return ans
if __name__ == '__main__':
s = Solution()
print(s.threeSum(nums=[-2,0,0,2,2]))
| 3.953125 | 4 |
pretrained_mobilenetssd_image.py | PacktPublishing/Computer-Vision-Python-OCR-Object-Detection-Quick-Starter | 6 | 12763594 | # -*- coding: utf-8 -*-
"""
@author: abhilash
"""
import numpy as np
import cv2
# load the image to detect, get width, height
# resize to match input size, convert to blob to pass into model
img_to_detect = cv2.imread('images/testing/scene3.jpg')
img_height = img_to_detect.shape[0]
img_width = img_to_detect.shape[1]
resized_img_to_detect = cv2.resize(img_to_detect,(300,300))
img_blob = cv2.dnn.blobFromImage(resized_img_to_detect,0.007843,(300,300),127.5)
#recommended scale factor is 0.007843, width,height of blob is 300,300, mean of 255 is 127.5,
# set of 21 class labels in alphabetical order (background + rest of 20 classes)
class_labels = ["background", "aeroplane", "bicycle", "bird", "boat","bottle", "bus", "car", "cat", "chair", "cow", "diningtable","dog", "horse", "motorbike", "person", "pottedplant", "sheep","sofa", "train", "tvmonitor"]
# Loading pretrained model from prototext and caffemodel files
# input preprocessed blob into model and pass through the model
# obtain the detection predictions by the model using forward() method
mobilenetssd = cv2.dnn.readNetFromCaffe('dataset/mobilenetssd.prototext','dataset/mobilenetssd.caffemodel')
mobilenetssd.setInput(img_blob)
obj_detections = mobilenetssd.forward()
# returned obj_detections[0, 0, index, 1] , 1 => will have the prediction class index
# 2 => will have confidence, 3 to 7 => will have the bounding box co-ordinates
no_of_detections = obj_detections.shape[2]
# loop over the detections
for index in np.arange(0, no_of_detections):
prediction_confidence = obj_detections[0, 0, index, 2]
# take only predictions with confidence more than 20%
if prediction_confidence > 0.20:
#get the predicted label
predicted_class_index = int(obj_detections[0, 0, index, 1])
predicted_class_label = class_labels[predicted_class_index]
#obtain the bounding box co-oridnates for actual image from resized image size
bounding_box = obj_detections[0, 0, index, 3:7] * np.array([img_width, img_height, img_width, img_height])
(start_x_pt, start_y_pt, end_x_pt, end_y_pt) = bounding_box.astype("int")
# print the prediction in console
predicted_class_label = "{}: {:.2f}%".format(class_labels[predicted_class_index], prediction_confidence * 100)
print("predicted object {}: {}".format(index+1, predicted_class_label))
# draw rectangle and text in the image
cv2.rectangle(img_to_detect, (start_x_pt, start_y_pt), (end_x_pt, end_y_pt), (0,255,0), 2)
cv2.putText(img_to_detect, predicted_class_label, (start_x_pt, start_y_pt-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1)
cv2.imshow("Detection Output", img_to_detect)
| 3.375 | 3 |
setup.py | messa/aiohttp-request-id-logging | 8 | 12763595 | #!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name='aiohttp-request-id-logging',
version='0.0.1',
description='Setup proper request id logging for your Aiohttp app',
classifiers=[
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
packages=find_packages(exclude=['doc', 'tests*']),
install_requires=[
'aiohttp',
])
| 1.4375 | 1 |
src/python/utility/strings.py | andyjost/Sprite | 1 | 12763596 | <filename>src/python/utility/strings.py<gh_stars>1-10
from six import PY2, PY3, text_type, binary_type
def ensure_str_safe(arg, encoding='utf-8', errors='strict'):
'''Like ensure_str, but passes non-string-like objects through.'''
if isinstance(arg, (text_type, binary_type)):
return ensure_str(arg, encoding, errors)
else:
return arg
def ensure_binary_safe(arg, encoding='utf-8', errors='strict'):
'''Like ensure_binary, but passes non-string-like objects through.'''
if isinstance(arg, (text_type, binary_type)):
return ensure_binary(arg, encoding, errors)
else:
return arg
def ensure_text_safe(arg, encoding='utf-8', errors='strict'):
'''Like ensure_text, but passes non-string-like objects through.'''
if isinstance(arg, (text_type, binary_type)):
return ensure_text(arg, encoding, errors)
else:
return arg
try:
from six import ensure_str
except ImportError:
def ensure_str(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
# Optimization: Fast return for the common case.
if type(s) is str:
return s
if PY2 and isinstance(s, text_type):
return s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
return s.decode(encoding, errors)
elif not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
return s
try:
from six import ensure_binary
except ImportError:
def ensure_binary(s, encoding='utf-8', errors='strict'):
"""Coerce **s** to six.binary_type.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
if isinstance(s, binary_type):
return s
if isinstance(s, text_type):
return s.encode(encoding, errors)
raise TypeError("not expecting type '%s'" % type(s))
try:
from six import ensure_text
except ImportError:
def ensure_text(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to six.text_type.
For Python 2:
- `unicode` -> `unicode`
- `str` -> `unicode`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, binary_type):
return s.decode(encoding, errors)
elif isinstance(s, text_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
| 2.890625 | 3 |
migrations/versions/65f48a3dd741_change_image_paths.py | audreynjiraini/personal-blog | 0 | 12763597 | <filename>migrations/versions/65f48a3dd741_change_image_paths.py<gh_stars>0
"""change image paths
Revision ID: 65f48a3dd741
Revises: <KEY>
Create Date: 2019-09-24 09:15:00.262210
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '65f48a3dd741'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('profile_pic_path', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'profile_pic_path')
# ### end Alembic commands ###
| 1.460938 | 1 |
script_sequential_mnist.py | CookieBox26/_ML | 0 | 12763598 | from utils.data_loader import MNIST
from models.gru import GRU
from models.tcn import TCN
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
# 1エポック学習します
def train(model, optimizer, train_loader, log_interval=10):
model.train()
loss_in_log_interval = 0
n_samples_processed = 0
for i_batch, (x, y) in enumerate(train_loader):
x, y = Variable(x), Variable(y)
optimizer.zero_grad()
if type(model) is TCN:
y_hat = model(x)
else:
hidden = model.generate_initial_hidden(x.size()[0])
y_hat, hidden = model(x, hidden)
loss = F.nll_loss(y_hat, y)
loss.backward()
optimizer.step()
loss_in_log_interval += loss.item()
n_samples_processed += x.size()[0]
if (i_batch + 1) % log_interval == 0:
print('{}/{} バッチ ({}/{} サンプル) 流れました 最近 {} バッチの平均損失 {}'.format(
i_batch + 1, len(train_loader),
n_samples_processed, len(train_loader.dataset),
log_interval, loss_in_log_interval / log_interval
))
loss_in_log_interval = 0
# テストデータに対してテストします
def test(model, test_loader):
model.eval()
n_total = len(test_loader.dataset)
test_loss = 0
n_correct = 0.0
with torch.no_grad():
for x, y in test_loader:
if type(model) is TCN:
y_hat = model(x)
else:
hidden = model.generate_initial_hidden(x.size()[0])
y_hat, hidden = model(x, hidden)
test_loss += F.nll_loss(y_hat, y, reduction='sum').item()
pred = y_hat.data.max(1, keepdim=True)[1]
n_correct += pred.eq(y.data.view_as(pred)).sum()
test_loss /= n_total
print(f'テストデータでの平均損失 {test_loss}')
print('テストデータでの正解率 {}/{} ({:.2%})'.format(int(n_correct), n_total, n_correct / n_total))
# メイン
# - arch : 学習するモデル構造を gru, tcn から指定します
# - id : 吐き出す重みファイルの識別子です
# - weight_dict : 既に重みファイルがあれば読み込みます
# - epochs : エポック数です 0にすると訓練スキップになります
# - permute : これを指定すると系列をこのインデックスの順序に入れ換えます
def main(arch='gru', id='hoge', weight_dict=None, epochs=10, permute=None):
batch_size = 64
train_loader, test_loader = MNIST(batch_size=batch_size,
sequential=(arch == 'tcn'),
sequential_rnn=(arch != 'tcn'),
permute=permute)
if arch == 'tcn':
model = TCN(input_size=1, output_size=10, num_channels=[25]*8,
kernel_size=7, dropout=0.0)
optimizer = optim.Adam(model.parameters(), lr=2e-3)
elif arch == 'gru':
model = GRU(input_size=1, output_size=10, num_layers=1, d_hidden=128,
initial_update_gate_bias=0.5, dropout=0.0)
optimizer = optim.RMSprop(model.parameters(), lr=1e-3)
if weight_dict is not None:
model.load_state_dict(torch.load(weight_dict))
for epoch in range(epochs):
print(f'エポック {epoch}')
train(model, optimizer, train_loader)
test(model, test_loader)
torch.save(model.state_dict(), f'./weights/{arch}_sequential_mnist_{id}_{epoch}.dict')
test(model, test_loader)
if __name__ == '__main__':
np.random.seed(0)
torch.manual_seed(0)
main(arch='gru', weight_dict='./weights/gru_sequential_mnist_sample.dict', epochs=0)
main(arch='tcn', weight_dict='./weights/tcn_sequential_mnist_sample.dict', epochs=0)
# main(arch='gru', epochs=1)
# main(arch='tcn', epochs=1)
# Permuted MNIST をする場合
permute = np.random.permutation(784)
# main(arch='gru', epochs=1, permute=permute)
# main(arch='tcn', epochs=1, permute=permute)
| 2.859375 | 3 |
jsonlddb/index.py | u8sand/jsonlddb | 0 | 12763599 | def dds_insert(d, s, p, o):
if d.get(s) is None:
d[s] = {}
if d[s].get(p) is None:
d[s][p] = set()
d[s][p].add(o)
def dds_remove(d, s, p, o):
if d.get(s) is not None and d[s].get(p) is not None:
d[s][p].remove(o)
if not d[s][p]:
del d[s][p]
if not d[s]:
del d[s]
class JsonLDIndex:
def __init__(self, spo=None, pos=None):
self.spo = {} if spo is None else spo
self.pos = {} if pos is None else pos
#
def insert_triples(self, triples):
for subj, pred, obj in triples:
dds_insert(self.spo, subj, pred, obj)
# dds_insert(self.spo, obj, '~'+pred, subj)
dds_insert(self.pos, pred, obj, subj)
dds_insert(self.pos, '~'+pred, subj, obj)
#
return self
#
def remove_triples(self, triples):
for subj, pred, obj in triples:
dds_remove(self.spo, subj, pred, obj)
# dds_remove(self.spo, obj, '~'+pred, subj)
dds_remove(self.pos, pred, obj, subj)
dds_remove(self.pos, '~'+pred, subj, obj)
#
return self
| 3.078125 | 3 |
views.py | ernstki/flask-admin-column-labels | 2 | 12763600 | <reponame>ernstki/flask-admin-column-labels<filename>views.py
from flask_admin.contrib.sqla import ModelView
class ColumnPropertiesView(ModelView):
"""
Shows table metadata gleaned from a view that's based on the MySQL
information_schema.TABLES table, scoped to just tables that exist in
the current database.
"""
column_list = ['parent_table', 'name', 'proper_name', 'comment']
column_sortable_list = column_list
class TestView(ModelView):
column_display_pk = True
list_template = 'admin/model/list_header_tooltips.html'
def __init__(self, model, session, **kwargs):
from models import ColumnProperty as cp
clabels = {}
cdescriptions = {}
q = session.query(cp).filter(cp.parent_table==model.__tablename__)
for row in q.all():
clabels[row.name] = row.proper_name
cdescriptions[row.name] = row.comment
print(clabels)
print(cdescriptions)
self.column_labels = clabels
self.column_descriptions = cdescriptions
super(TestView, self).__init__(model, session, **kwargs)
| 2.328125 | 2 |
tests/test_books.py | pavlovprojects/python_qa_module_testing | 0 | 12763601 | import unittest
from src import Book
class TestBookClass(unittest.TestCase):
BOOK_DATA = {"title": "Просто Книга", "author": "<NAME>",
"genre": "Боевик", "year": "2020"}
def setUp(self):
self.book = Book(self.BOOK_DATA)
def test_book_attributes(self):
self.assertEqual(self.book.title, self.BOOK_DATA["title"])
self.assertEqual(self.book.genre, self.BOOK_DATA["genre"])
self.assertEqual(self.book.year, self.BOOK_DATA["year"])
self.assertEqual(self.book.author, self.BOOK_DATA["author"])
def test_book_validation(self):
self.assertRaises(ValueError, Book, {"title": "Название"})
self.assertRaises(ValueError, Book, {"title": "Название", "author": "Some author"})
self.assertRaises(ValueError, Book, {"title": "Просто Книга", "author": "<NAME>", "genre": "Боевик"})
def test_book_representation(self):
self.assertEqual(str(self.book), f"Книга: {self.BOOK_DATA['title']}, Автор: {self.BOOK_DATA['author']}")
| 3.640625 | 4 |
src/livecli/plugins/foxtr.py | NghiemTrung/livecli | 1 | 12763602 | <reponame>NghiemTrung/livecli
from __future__ import print_function
import re
from livecli.plugin import Plugin
from livecli.plugin.api import http
from livecli.stream import HLSStream
__livecli_docs__ = {
"domains": [
"fox.com.tr",
],
"geo_blocked": [],
"notes": "",
"live": True,
"vod": False,
"last_update": "2017-02-12",
}
class FoxTR(Plugin):
"""
Support for Turkish Fox live stream: http://www.fox.com.tr/canli-yayin
"""
url_re = re.compile(r"https?://www.fox.com.tr/canli-yayin")
playervars_re = re.compile(r"desktop\s*:\s*\[\s*\{\s*src\s*:\s*'(.*?)'", re.DOTALL)
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def _get_streams(self):
res = http.get(self.url)
match = self.playervars_re.search(res.text)
if match:
stream_url = match.group(1)
return HLSStream.parse_variant_playlist(self.session, stream_url)
__plugin__ = FoxTR
| 2.328125 | 2 |
colors.py | mmarchetti/cv_sandbox | 0 | 12763603 | <reponame>mmarchetti/cv_sandbox
# BGR colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GRAY25 = (64, 64, 64)
GRAY50 = (128, 128, 128)
GRAY75 = (192, 192, 192)
GRAY33 = (85, 85, 85)
GRAY66 = (170, 170, 170)
BLUE = (255, 0, 0)
GREEN = (0, 255, 0)
RED = (0, 0, 255)
CYAN = (255, 255, 0)
MAGENTA = (255, 0, 255)
YELLOW = (0, 255, 255)
ORANGE = (0, 128, 255)
PURPLE = (255, 0, 128)
MINT = (128, 255, 0)
LIME = (0, 255, 128)
PINK = (128, 0, 255)
| 1.375 | 1 |
result/error_analyze.py | shuu-tatsu/distantSV | 1 | 12763604 | <reponame>shuu-tatsu/distantSV
import sys
class ErrorAnalyzer():
def __init__(self, known_ne, unk_ne, result_file_list):
self.known_ne_set = known_ne
self.unk_ne_set = unk_ne
self.result_output_list = read_file(result_file_list)
self.known_occurrences = 0
self.unk_occurrences = 0
self.FN_known_error = 0
self.FN_unk_error = 0
self.FP_known_error = 0
self.FP_unk_error = 0
self.gold_others_occurrences = 0
self.FN_others_error = 0
self.FP_others_error = 0
def count_gold(self, word):
if word in self.known_ne_set:
self.known_occurrences += 1
elif word in self.unk_ne_set:
self.unk_occurrences += 1
else:
self.gold_others_occurrences += 1
def count(self):
for output in self.result_output_list:
try:
word, gold, pred = output.split()
if gold != 'O':
self.count_gold(word)
if gold != pred:
self.count_error(word, gold, pred)
except ValueError:
pass
self.get_error_probability()
def count_error(self, word, gold, pred):
if gold == 'O':
self.count_FN_error(word)
else:
self.count_FP_error(word)
def count_FN_error(self, word):
if word in self.known_ne_set:
self.FN_known_error += 1
elif word in self.unk_ne_set:
self.FN_unk_error += 1
else:
self.FN_others_error += 1
def count_FP_error(self, word):
if word in self.known_ne_set:
self.FP_known_error += 1
elif word in self.unk_ne_set:
self.FP_unk_error += 1
else:
self.FP_others_error += 1
def get_error_probability(self):
self.total_error = self.FN_known_error + self.FN_unk_error +\
self.FP_known_error + self.FP_unk_error +\
self.gold_others_occurrences + self.FN_others_error + self.FP_others_error
#FN error
#Known error
self.FN_known_error_prob = division(self.FN_known_error, self.known_occurrences)
#Unk error
self.FN_unk_error_prob = division(self.FN_unk_error, self.unk_occurrences)
#NE error / total
self.FN_ne_error_prob = division((self.FN_known_error + self.FN_unk_error), self.total_error)
#Others error / total
self.FN_others_error_prob = division(self.FN_others_error, self.total_error)
#FP error
#Known error
self.FP_known_error_prob = division(self.FP_known_error, self.known_occurrences)
#Unk error
self.FP_unk_error_prob = division(self.FP_unk_error, self.unk_occurrences)
#NE error / total
self.FP_ne_error_prob = division((self.FP_known_error + self.FP_unk_error), self.total_error)
#Others error / total
self.FP_others_error_prob = division(self.FP_others_error, self.total_error)
class Vocabulary():
def __init__(self, file_list):
self.word_label_list = read_file(file_list)
self.vocab_set, self.vocab_size = self.make_ne_list()
def make_ne_list(self):
vocab = set()
for line in self.word_label_list:
try:
word, label = line.split()
if label != 'O':
vocab.add(word)
except ValueError:
pass
return vocab, len(vocab)
def read_file(file_list):
word_label_list = []
for file_path in file_list:
with open(file_path, 'r') as r:
word_label_list.extend([line for line in r])
return word_label_list
def division(denominator, numerator):
try:
answer = denominator / numerator
except ZeroDivisionError:
answer = -1
return answer
def main():
args = sys.argv
TRAIN_FILE = args[1]
DEV_FILE = args[2]
TEST_FILE = args[3]
RESULT_FILE = args[4]
# Prepare Vocab
known_ne = Vocabulary([TRAIN_FILE, DEV_FILE])
print('Known NE set size: {}'.format(known_ne.vocab_size))
test_occurrences_ne = Vocabulary([TEST_FILE])
print('Test occurrences NE set size: {}'.format(test_occurrences_ne.vocab_size))
known_ne_and_test_occurrences_ne = known_ne.vocab_set & test_occurrences_ne.vocab_set
unk_ne_vocab_set = test_occurrences_ne.vocab_set - known_ne.vocab_set
print('Known NE & Test occurrences size: {}'.format(len(known_ne_and_test_occurrences_ne)))
print('Unk NE in Test occurrences size: {}'.format(len(unk_ne_vocab_set)))
print('')
# Analyze
analyser = ErrorAnalyzer(known_ne_and_test_occurrences_ne, unk_ne_vocab_set, [RESULT_FILE])
analyser.count()
print('Known occurrences: {}'.format(analyser.known_occurrences))
print('Unk occurrences: {}'.format(analyser.unk_occurrences))
print('')
print('Total Error: {}'.format(analyser.total_error))
print('#FN')
print('FN known error / known occur: {0:.3f} FN unk error / unk occur: {1:.3f} \nFN ne error / total:{2:.3f} FN others error / total: {3:.3f}'.format(
analyser.FN_known_error_prob, analyser.FN_unk_error_prob, analyser.FN_ne_error_prob, analyser.FN_others_error_prob))
print('#FP')
print('FP known error / known occur: {0:.3f} FP unk error / unk occur: {1:.3f} \nFP ne error / total:{2:.3f} FP others error / total: {3:.3f}'.format(
analyser.FP_known_error_prob, analyser.FP_unk_error_prob, analyser.FP_ne_error_prob, analyser.FP_others_error_prob))
if __name__ == '__main__':
main()
| 2.84375 | 3 |
app/app.py | jhollowe/groceri.es | 0 | 12763605 | <filename>app/app.py
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_babel import Babel, format_date
from config import Config
from slugify import slugify
import pycountry
app = Flask(__name__)
app.config.from_object(Config())
db = SQLAlchemy(app)
migrate = Migrate(app, db, compare_type=True, render_as_batch=True)
login_manager = LoginManager(app)
login_manager.login_view = 'login'
babel = Babel(app)
from models import User # noqa
@login_manager.user_loader
def load_user(user_id):
"""Load user with specified user ID."""
return User.query.get(int(user_id))
from models import Setting # noqa
@babel.localeselector
def get_locale():
"""Get the selected locale from user settings."""
setting = Setting.query.filter(Setting.name == 'default_language').first()
if setting is not None:
return setting.value
# Return default language when none found
return 'en'
@app.template_filter('slugify')
def slug(value):
"""Jinja2 filter to slugify text."""
return slugify(value)
@app.template_filter('language_name')
def language_name(value):
"""Jinja2 filter to get language object from language code."""
return pycountry.languages.get(alpha_2=value)
@app.template_filter('date')
def format_datetime(value, format="short"):
"""Format a date time to (Default): d Mon YYYY HH:MM P"""
if value is None:
return ""
return format_date(value, format)
"""Import all web routes and CLI routes to run this app"""
import views, cli # noqa | 2.65625 | 3 |
Lista3_11.py | AlessandroGoncalve/Lista3_Python | 12 | 12763606 | <reponame>AlessandroGoncalve/Lista3_Python
#Altere o programa anterior para mostrar no final a soma dos números.
n1 = int(input("Digite um número: "))
n2 = int(input("Digite outro número: "))
for i in range(n1 + 1, n2):
print(i)
for i in range(n2 + 1, n1):
print(i)
print("Soma dos números: ", i + i)
| 4.125 | 4 |
jsengine/chakra_win.py | SeaHOH/jsengine | 4 | 12763607 | <gh_stars>1-10
'''This is a Python binding to Microsoft Chakra Javascript engine.
Forked from PyChakra (https://github.com/zhengrenzhe/PyChakra) to support
Windows' built-in Chakra.
'''
import ctypes as _ctypes
import threading as _threading
import json
# load Windows' built-in chakra binary
try:
chakra = _ctypes.windll.Chakra
except:
chakra_available = False
else:
chakra_available = True
chakra._current_runtime = None
threading = False
_lock = None
def _enable_lock():
global _lock
if _lock is not None:
return
_lock = _threading.Lock()
def _disable_lock():
global _lock
if _lock is None:
return
try:
_lock.release()
except:
pass
_lock = None
class ChakraHandle(object):
def _acquire(self):
if threading:
_enable_lock()
_lock.acquire()
else:
_disable_lock()
self.set_current_runtime()
def _release(self):
if threading:
try:
_lock.release()
except:
pass
else:
_disable_lock()
def set_current_runtime(self):
runtime = id(self)
if chakra._current_runtime != runtime:
chakra._current_runtime = runtime
chakra.JsSetCurrentContext(self.__context)
def __init__(self):
# create chakra runtime and context
runtime = _ctypes.c_void_p()
chakra.JsCreateRuntime(0, 0, point(runtime))
context = _ctypes.c_void_p()
chakra.JsCreateContext(runtime, point(context))
chakra.JsSetCurrentContext(context)
self.__runtime = runtime
self.__context = context
# get JSON.stringify reference, and create its called arguments array
stringify = self.eval('JSON.stringify', raw=True)[1]
undefined = _ctypes.c_void_p()
chakra.JsGetUndefinedValue(point(undefined))
args = (_ctypes.c_void_p * 2)()
args[0] = undefined
self.__jsonStringify = stringify
self.__jsonStringifyArgs = args
def __del__(self):
chakra.JsDisposeRuntime(self.__runtime)
def eval(self, script, raw=False):
'''Eval javascript string
Examples:
.eval('(()=>2)()') // (True, 2)
.eval('(()=>a)()') // (False, "ReferenceError: 'a' is not defined")
Parameters:
script(str): javascript code string
raw(bool?): whether return result as chakra JsValueRef directly
(optional, default is False)
Returns:
(bool, result)
bool: indicates whether javascript is running successfully.
result: if bool is True, result is the javascript running
return value.
if bool is False and result is string, result is the
javascript running exception
if bool is False and result is number, result is the
chakra internal error code
'''
self._acquire()
js_source = _ctypes.c_wchar_p('')
js_script = _ctypes.c_wchar_p(script)
result = _ctypes.c_void_p()
err = chakra.JsRunScript(js_script, 0, js_source, point(result))
try:
# eval success
if err == 0:
if raw:
return True, result
else:
return self.__js_value_to_py_value(result)
return self.__get_error(err)
finally:
self._release()
def __js_value_to_py_value(self, js_value):
args = self.__jsonStringifyArgs
args[1] = js_value
# value => json
result = _ctypes.c_void_p()
err = chakra.JsCallFunction(
self.__jsonStringify, point(args), 2, point(result))
if err == 0:
result = self.__js_value_to_str(result)
if result == 'undefined':
result = None
else:
# json => value
result = json.loads(result)
return True, result
return self.__get_error(err)
def __get_error(self, err):
# js exception or other error
# 0x30000, JsErrorCategoryScript
# 0x30001, JsErrorScriptException
# 0x30002, JsErrorScriptCompile
if 0x30000 ^ err < 3:
err = self.__get_exception()
return False, err
def __get_exception(self):
exception = _ctypes.c_void_p()
chakra.JsGetAndClearException(point(exception))
return self.__js_value_to_str(exception)
def __js_value_to_str(self, js_value):
js_value_ref = _ctypes.c_void_p()
chakra.JsConvertValueToString(js_value, point(js_value_ref))
str_p = _ctypes.c_wchar_p()
str_l = _ctypes.c_size_t()
chakra.JsStringToPointer(js_value_ref, point(str_p), point(str_l))
return str_p.value
def point(any):
return _ctypes.byref(any)
| 2.171875 | 2 |
classification_scale.py | jrctrabuco/Assay-Multiparameter-Dash | 0 | 12763608 | #Scale for classification of Devices
| 1.085938 | 1 |
model_code/mechanisms/parametric_mse.py | burrelln/Measurement-Integrity-and-Peer-Assessment | 0 | 12763609 | <filename>model_code/mechanisms/parametric_mse.py<gh_stars>0
"""
Implementation of the parametric MSE mechanism and the EM procedure used to estimate the parameters of the parametric model (model PG_1 from Piech et al. 2013).
@author: <NAME> <<EMAIL>>
"""
import numpy as np
from math import sqrt
from sklearn.metrics import mean_squared_error as mse
from scipy.stats import gamma as gamma_distribution
def mse_p_mechanism(grader_dict, student_list, assignment_num, mu, gamma, bias=True):
"""
Computes payments for students according to the MSE_P mechanism.
Prints a warning if the EM estimation procedure does not converge.
Returns the estimated parameters.
Parameters
----------
grader_dict : dict.
Maps a Submission object to a list of graders (Student objects).
student_list : list of Student objects.
The population of students/graders.
assignment_num : int.
Unique identifier of the assignment for which payments are being computed.
mu : float.
The mean of the normal approximation of the distribution of true grades.
gamma : float.
The precision (i.e. the inverse of the variance) of the normal approximation of the distribution of true grades.
bias : bool, optional.
Indicates whether agents have bias, and therefore whether bias parameters should be estimated. The default is True.
Returns
-------
scores : np.array of floats.
The estimated grade computed for each submission.
reliability : np.array of floats.
The estimated reliability computed for each grader.
biases : np.array of floats.
The estimated bias computed for each grader. All zeroes when bias=False.
"""
biases, reliability, scores, iteration = em_estimate_parameters(grader_dict, student_list, assignment_num, mu, gamma, bias)
if not iteration < 1000:
print("EM estimation procedure did not converge.")
reliability = np.zeros(len(student_list))
for student in student_list:
student.payment += reliability[student.id]
else:
for student in student_list:
tasks = []
reports = []
ground_truth = []
b = biases[student.id]
for task, report in student.grades[assignment_num].items():
tasks.append(task)
reports.append(report - b)
ground_truth.append(scores[task])
student.payment -= mse(ground_truth, reports)
return scores, reliability, biases
def em_estimate_parameters(grader_dict, student_list, assignment_num, mu, gamma, include_bias=False):
"""
Estimates parametric model parameters using EM-style algorithm with Bayesian updating.
Parameters
----------
grader_dict : dict.
Maps a Submission object to a list of graders (Student objects).
student_list : list of Student objects.
The population of students/graders.
assignment_num : int.
Unique identifier of the assignment for which payments are being computed.
mu : float.
The mean of the normal approximation of the distribution of true grades.
gamma : float.
The precision (i.e. the inverse of the variance) of the normal approximation of the distribution of true grades.
include_bias : bool, optional.
Indicates whether agents have bias, and therefore whether bias parameters should be estimated. The default is False.
Returns
-------
biases : np.array of floats.
The estimated bias computed for each grader. All zeroes when bias=False.
reliability : np.array of floats.
The estimated reliability computed for each grader.
scores : np.array of floats.
The estimated grade computed for each submission.
iteration : int.
The total number of iterations of the EM process.
Value indicates either that the score estimates conveged or that the score estimates did not converge and the estimation was stopped after 1000 iterations.
"""
biases = np.zeros(len(student_list))
reliability = np.full(len(student_list), 1/1.05)
scores = np.full(len(student_list), 7.0)
old_scores = np.ones(len(student_list))
iteration = 0
while np.linalg.norm((old_scores - scores)) > 0.0001 and iteration < 1000:
old_scores = np.copy(scores)
#One Iteration of EM
for submission in grader_dict.keys():
#First compute the scores
graders = list(submission.grades.keys())
numerator_list = [sqrt(reliability[g])*(submission.grades[g] - biases[g]) for g in graders]
denominator_list = [sqrt(reliability[g]) for g in graders]
numerator_sum = sum(numerator_list)
denominator_sum = sum(denominator_list)
numerator = sqrt(gamma)*mu + numerator_sum
denominator = sqrt(gamma) + denominator_sum
scores[submission.student_id] = numerator/denominator
if include_bias:
for student in student_list:
#Then compute the bias
"""
BAYESIAN UPDATING: Conjugate prior is a Normal distirbution.
"""
#prior_mu = 0
prior_tau = 1
samples = [(s - scores[num]) for num, s in student.grades[assignment_num].items()]
sample_sum = sum(samples)
n = len(samples)
tau = reliability[student.id]
posterior_tau = prior_tau + n*tau
posterior_mu = (tau*sample_sum)/posterior_tau
biases[student.id] = posterior_mu
for student in student_list:
#Then compute the reliability
"""
BAYESIAN UPDATING: Conjugate Prior is a Gamma distribution
"""
prior_a = 10.0/1.05
prior_B = 10.0
residuals = [ (s - (scores[num] + biases[student.id]))**2 for num, s in student.grades[assignment_num].items()]
n = len(residuals)
residual_sum = sum(residuals)
posterior_a = prior_a + n/2.0
posterior_B = prior_B + residual_sum/2.0
posterior_theta = 1.0 / posterior_B
score = gamma_distribution.mean(a=posterior_a, scale=posterior_theta)
reliability[student.id] = score
iteration += 1
return biases, reliability, scores, iteration | 2.96875 | 3 |
estomagordo-python3/day_11b.py | erikagnvall/advent_of_code_2019 | 0 | 12763610 | <filename>estomagordo-python3/day_11b.py
import re
from heapq import heappop, heappush
from collections import Counter, defaultdict
from itertools import permutations
def paint(painted):
loy = 1000
hiy = -1000
lox = 1000
hix = -1000
out = []
for y, x in painted.keys():
loy = min(loy, y)
hiy = max(hiy, y)
lox = min(lox, x)
hix = max(hix, x)
for y in range(loy, hiy + 1):
row = []
for x in range(lox, hix + 1):
row.append('.' if (y, x) not in painted or painted[(y, x)] == 0 else '#')
out.append(''.join(row))
return '\n'.join(out)
def solve(data, inp):
d = defaultdict(int)
for i, v in enumerate(data):
d[i] = v
painted = {}
y = 0
x = 0
directions = ((-1, 0), (0, 1), (1, 0), (0, -1))
facing = 0
p = 0
relbase = 0
steps = 0
output_count = 0
first = True
while True:
steps += 1
amode = (d[p] % 1000) // 100
bmode = (d[p] % 10000) // 1000
cmode = (d[p] % 100000) // 10000
a = d[p + 1] if amode == 1 else d[d[p + 1]] if amode == 0 else d[d[p + 1] + relbase]
b = d[p + 2] if bmode == 1 else d[d[p + 2]] if bmode == 0 else d[d[p + 2] + relbase]
c = d[p + 3] if cmode == 1 else d[d[p + 3]] if cmode == 0 else d[d[p + 3] + relbase]
if d[p] % 100 == 99:
return paint(painted)
elif d[p] % 100 == 1:
if cmode == 0:
d[d[p + 3]] = a + b
else:
d[d[p + 3] + relbase] = a + b
p += 4
elif d[p] % 100 == 2:
if cmode == 0:
d[d[p + 3]] = a * b
else:
d[d[p + 3] + relbase] = a * b
p += 4
elif d[p] % 100 == 3:
if amode == 0:
d[d[p + 1]] = inp if first else 0 if not (y, x) in painted else painted[(y, x)]
else:
d[d[p + 1] + relbase] = inp if first else 0 if not (y, x) in painted else painted[(y, x)]
first = False
p += 2
elif d[p] % 100 == 4:
if output_count % 2 == 0:
painted[(y, x)] = a
else:
if a == 0:
facing = (facing - 1) % 4
else:
facing = (facing + 1) % 4
y += directions[facing][0]
x += directions[facing][1]
output_count += 1
p += 2
elif d[p] % 100 == 5:
if a != 0:
p = b
else:
p += 3
elif d[p] % 100 == 6:
if a == 0:
p = b
else:
p += 3
elif d[p] % 100 == 7:
cc = 1 if a < b else 0
if cmode == 0:
d[d[p + 3]] = cc
else:
d[d[p + 3] + relbase] = cc
p += 4
elif d[p] % 100 == 8:
cc = 1 if a == b else 0
if cmode == 0:
d[d[p + 3]] = cc
else:
d[d[p + 3] + relbase] = cc
p += 4
elif d[p] % 100 == 9:
relbase += a
p += 2
else:
print('uh oh', d[p])
return 'outside-loop'
def read_and_solve():
with open('input_11.txt') as f:
data = list(map(int, f.readline().split(',')))
return solve(data, 1)
if __name__ == '__main__':
print(read_and_solve()) | 3.109375 | 3 |
api/models/__init__.py | weng-lab/SCREEN | 5 | 12763611 |
# SPDX-License-Identifier: MIT
# Copyright (c) 2016-2020 <NAME>, <NAME>, <NAME>, <NAME>
| 0.914063 | 1 |
0x04-python-more_data_structures/3-common_elements.py | malu17/alx-higher_level_programming | 0 | 12763612 | <gh_stars>0
#!/usr/bin/python3
def common_elements(set_1, set_2):
return (set(set_1) & set(set_2))
| 2.46875 | 2 |
backend/core/tests.py | ES2-UFPI/404-portal | 1 | 12763613 | from django.test import TestCase
from django.urls import reverse
from ..core.models import Portal
USERNAME = "username_test"
PASSWORD = "<PASSWORD>"
class PortalTestCase(TestCase):
@classmethod
def setUpTestData(self):
portal = Portal.objects.create()
portal.email = "<EMAIL>"
portal.nome_solicitante = "Username Teste"
portal.cpf_solicitante = "000.000.000-00"
portal.email_solicitante = "<EMAIL>"
portal.telefone_solicitante = "(86) 3200-0000"
portal.celular_solicitante = "(86) 9500-0000"
portal.universidade = "Universidade Federal do Piauí"
portal.departamento = "Computação"
portal.area_conhecimento = "Ciências da Natureza, Exatas"
portal.nome_do_curso = "Ciência da Computação"
portal.nome_do_coordenador = "Coordenador Teste"
portal.nome_do_chefe_departamento = "Chefe Teste"
portal.cidade = "Teresina"
portal.estado = "Piauí"
portal.cep = "64012-000"
portal.bairro = "Ininga"
portal.rua = "Petrônio Portela"
portal.numero = "0000"
portal.telefone_1 = "(86) 3201-0000"
portal.telefone_2 = "(86) 3202-0000"
portal.save()
class CoreTestCase(PortalTestCase):
def test_home(self):
self.client.login(username=USERNAME, password=PASSWORD)
resp = self.client.get(reverse('core:home'))
self.assertEqual(resp.status_code, 200)
def test_listar(self):
self.client.login(username=USERNAME, password=PASSWORD)
resp = self.client.get(reverse('core:listar'))
self.assertEqual(resp.status_code, 200)
def test_visualizar(self):
portal = Portal.objects.get(nome_do_curso="Ciência da Computação")
self.client.login(username=USERNAME, password=PASSWORD)
resp = self.client.get(reverse('core:visualizar', kwargs={'id': portal.id}))
self.assertEqual(resp.status_code, 200)
def test_editar(self):
portal = Portal.objects.get(nome_do_curso="Ciência da Computação")
self.client.login(username=USERNAME, password=PASSWORD)
self.assertEqual(portal.email, "<EMAIL>")
self.assertEqual(portal.nome_solicitante, "Username Teste")
self.assertEqual(portal.cpf_solicitante, "000.000.000-00")
self.assertEqual(portal.email_solicitante, "<EMAIL>")
self.assertEqual(portal.telefone_solicitante, "(86) 3200-0000")
self.assertEqual(portal.celular_solicitante, "(86) 9500-0000")
self.assertEqual(portal.universidade, "Universidade Federal do Piauí")
self.assertEqual(portal.departamento, "Computação")
self.assertEqual(portal.area_conhecimento, "Ciências da Natureza, Exatas")
self.assertEqual(portal.nome_do_coordenador, "Coordenador Teste")
self.assertEqual(portal.nome_do_curso, "Ciência da Computação")
self.assertEqual(portal.nome_do_chefe_departamento, "Chefe Teste")
self.assertEqual(portal.cidade, "Teresina")
self.assertEqual(portal.estado, "Piauí")
self.assertEqual(portal.cep, "64012-000")
self.assertEqual(portal.bairro, "Ininga")
self.assertEqual(portal.rua, "Petrônio Portela")
self.assertEqual(portal.numero, "0000")
self.assertEqual(portal.telefone_1, "(86) 3201-0000")
self.assertEqual(portal.telefone_2, "(86) 3202-0000")
url = reverse('core:editar', kwargs={'id': portal.id})
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
POST = {
"area_conhecimento": "Exatas"
}
resp = self.client.post(url, POST)
self.assertEqual(302, resp.status_code)
portal = Portal.objects.get(nome_do_curso="Ciência da Computação")
self.assertEqual(portal.area_conhecimento, "Exatas")
def test_login(self):
self.client.login(username=USERNAME, password=PASSWORD)
url = reverse('core:login')
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(url, '/login/')
def test_logout(self):
self.client.login(username=USERNAME, password=PASSWORD)
url = reverse('core:logout')
resp = self.client.get(url)
self.assertEqual(resp.status_code, 302)
self.assertEqual(url, '/logout/')
self.assertRedirects(resp, '/')
| 2.796875 | 3 |
lightnion/cache.py | pthevenet/lightnion | 120 | 12763614 | <reponame>pthevenet/lightnion
import os
import time
import json
import shutil
import base64
import logging
cache_directory = '.lightnion-cache.d'
def directory(base_dir=None):
if base_dir is None:
base_dir = os.getcwd()
base_dir = os.path.join(base_dir, cache_directory)
if not os.path.isdir(base_dir):
logging.info(
'Note: creating {} to cache descriptors.'.format(base_dir))
os.mkdir(base_dir)
if not os.path.isdir(base_dir):
raise RuntimeError(
'Unable to fetch cache directory: {}'.format(base_dir))
return base_dir
def purge():
base_dir = directory()
logging.warning('Note: removing {} to purge cache.'.format(base_dir))
shutil.rmtree(base_dir)
class descriptors:
@staticmethod
def filename(descriptor, get=False):
base_dir = 'descriptors'
if 'micro' in descriptor['flavor']:
base_dir = 'micro-' + base_dir
base_dir = os.path.join(directory(), base_dir)
if not os.path.isdir(base_dir):
os.mkdir(base_dir)
field = 'digest'
if 'micro' in descriptor['flavor']:
field = 'micro-digest'
digest = descriptor[field]
if (not get) or 'micro' in descriptor['flavor']:
digest = base64.b64decode(descriptor[field] + '====').hex()
half_dir = os.path.join(base_dir, digest[:2])
if not os.path.isdir(half_dir):
os.mkdir(half_dir)
return os.path.join(half_dir, digest)
@staticmethod
def put(descriptor):
filename = descriptors.filename(descriptor)
if os.path.isfile(filename):
return
with open(filename, 'w') as f:
json.dump(descriptor, f)
@staticmethod
def get(flavor, digest):
field = 'digest'
if 'micro' in flavor:
field = 'micro-digest'
descriptor = {'flavor': flavor, field: digest}
filename = descriptors.filename(descriptor, get=True)
with open(filename, 'r') as f:
descriptor = json.load(f)
if not descriptor['flavor'] == flavor:
raise ValueError('Mismatched flavor.')
new_digest = descriptor[field]
if not 'micro' in field:
new_digest = base64.b64decode(new_digest + '====').hex()
if not new_digest == digest:
raise ValueError('Mismatched digest.')
return descriptor
class consensus:
@staticmethod
def filename(flavor):
return os.path.join(directory(), 'consensus-{}'.format(flavor))
@staticmethod
def put(fields):
filename = consensus.filename(fields['flavor'])
with open(filename, 'w') as f:
json.dump(fields, f)
@staticmethod
def get(flavor):
filename = consensus.filename(flavor)
with open(filename, 'r') as f:
fields = json.load(f)
if not fields['flavor'] == flavor:
raise ValueError('Mismatched flavor.')
if fields['headers']['valid-until']['stamp'] < time.time():
raise ValueError('Consensus need to be refreshed: {} < {}'.format(
fields['headers']['valid-until']['stamp'], time.time()))
return fields
| 2.375 | 2 |
simpleAICV/classification/datasets/ilsvrc2012dataset.py | zgcr/pytorch-ImageNet-CIFAR-COCO-voc-training | 0 | 12763615 | <filename>simpleAICV/classification/datasets/ilsvrc2012dataset.py<gh_stars>0
import os
import cv2
import numpy as np
from torch.utils.data import Dataset
class ILSVRC2012Dataset(Dataset):
'''
ILSVRC2012 Dataset:https://image-net.org/
'''
def __init__(self, root_dir, set_name='train', transform=None):
assert set_name in ['train', 'val'], 'Wrong set name!'
# make sure all directories in set_dir directory are sub-categories directory and no other files
set_dir = os.path.join(root_dir, set_name)
sub_class_name_list = []
for per_sub_class_name in os.listdir(set_dir):
sub_class_name_list.append(per_sub_class_name)
sub_class_name_list = sorted(sub_class_name_list)
self.image_path_list = []
for per_sub_class_name in sub_class_name_list:
per_sub_class_dir = os.path.join(set_dir, per_sub_class_name)
for per_image_name in os.listdir(per_sub_class_dir):
per_image_path = os.path.join(per_sub_class_dir,
per_image_name)
self.image_path_list.append(per_image_path)
self.class_name_to_label = {
sub_class_name: i
for i, sub_class_name in enumerate(sub_class_name_list)
}
self.label_to_class_name = {
i: sub_class_name
for i, sub_class_name in enumerate(sub_class_name_list)
}
self.transform = transform
print(f'Dataset Size:{len(self.image_path_list)}')
print(f'Dataset Class Num:{len(self.class_name_to_label)}')
def __len__(self):
return len(self.image_path_list)
def __getitem__(self, idx):
image = self.load_image(idx)
label = self.load_label(idx)
sample = {
'image': image,
'label': label,
}
if self.transform:
sample = self.transform(sample)
return sample
def load_image(self, idx):
image = cv2.imdecode(
np.fromfile(self.image_path_list[idx], dtype=np.uint8),
cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image.astype(np.float32)
def load_label(self, idx):
label = self.class_name_to_label[self.image_path_list[idx].split('/')
[-2]]
label = np.array(label)
return label.astype(np.float32)
if __name__ == '__main__':
import os
import random
import numpy as np
import torch
seed = 0
# for hash
os.environ['PYTHONHASHSEED'] = str(seed)
# for python and numpy
random.seed(seed)
np.random.seed(seed)
# for cpu gpu
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
import os
import sys
BASE_DIR = os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
sys.path.append(BASE_DIR)
from tools.path import ILSVRC2012_path
import torchvision.transforms as transforms
from tqdm import tqdm
from simpleAICV.classification.common import Opencv2PIL, PIL2Opencv, TorchRandomResizedCrop, TorchRandomHorizontalFlip, TorchResize, TorchCenterCrop, Normalize, AutoAugment, RandAugment, ClassificationCollater
ilsvrc2012traindataset = ILSVRC2012Dataset(
root_dir=ILSVRC2012_path,
set_name='train',
transform=transforms.Compose([
Opencv2PIL(),
TorchRandomResizedCrop(resize=224),
TorchRandomHorizontalFlip(prob=0.5),
PIL2Opencv(),
# Normalize(),
]))
count = 0
for per_sample in tqdm(ilsvrc2012traindataset):
print(per_sample['image'].shape, per_sample['label'].shape,
per_sample['label'], type(per_sample['image']),
type(per_sample['label']))
# temp_dir = './temp'
# if not os.path.exists(temp_dir):
# os.makedirs(temp_dir)
# color = [random.randint(0, 255) for _ in range(3)]
# image = np.ascontiguousarray(per_sample['image'], dtype=np.uint8)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# label = per_sample['label']
# text = f'label:{int(label)}'
# cv2.putText(image,
# text, (30, 30),
# cv2.FONT_HERSHEY_PLAIN,
# 1.5,
# color=color,
# thickness=1)
# cv2.imencode('.jpg', image)[1].tofile(
# os.path.join(temp_dir, f'idx_{count}.jpg'))
if count < 10:
count += 1
else:
break
from torch.utils.data import DataLoader
collater = ClassificationCollater()
train_loader = DataLoader(ilsvrc2012traindataset,
batch_size=128,
shuffle=True,
num_workers=4,
collate_fn=collater)
count = 0
for data in tqdm(train_loader):
images, labels = data['image'], data['label']
print(images.shape, labels.shape)
print(images.dtype, labels.dtype)
if count < 10:
count += 1
else:
break
ilsvrc2012valdataset = ILSVRC2012Dataset(root_dir=ILSVRC2012_path,
set_name='val',
transform=transforms.Compose([
Opencv2PIL(),
TorchResize(resize=256),
TorchCenterCrop(resize=224),
PIL2Opencv(),
Normalize(),
]))
count = 0
for per_sample in tqdm(ilsvrc2012valdataset):
print(per_sample['image'].shape, per_sample['label'].shape,
per_sample['label'], type(per_sample['image']),
type(per_sample['label']))
if count < 10:
count += 1
else:
break
from torch.utils.data import DataLoader
collater = ClassificationCollater()
val_loader = DataLoader(ilsvrc2012valdataset,
batch_size=128,
shuffle=False,
num_workers=4,
collate_fn=collater)
count = 0
for data in tqdm(val_loader):
images, labels = data['image'], data['label']
print(images.shape, labels.shape)
print(images.dtype, labels.dtype)
if count < 10:
count += 1
else:
break
# ilsvrc2012traindataset = ILSVRC2012Dataset(
# root_dir=ILSVRC2012_path,
# set_name='train',
# transform=transforms.Compose([
# Opencv2PIL(),
# TorchRandomResizedCrop(resize=224),
# TorchRandomHorizontalFlip(prob=0.5),
# # AutoAugment(),
# RandAugment(N=2, M=10),
# PIL2Opencv(),
# Normalize(),
# ]))
# count = 0
# for per_sample in tqdm(ilsvrc2012traindataset):
# print(per_sample['image'].shape, per_sample['label'].shape,
# per_sample['label'], type(per_sample['image']),
# type(per_sample['label']))
# if count < 10:
# count += 1
# else:
# break
# from torch.utils.data import DataLoader
# collater = ClassificationCollater()
# train_loader = DataLoader(ilsvrc2012traindataset,
# batch_size=128,
# shuffle=True,
# num_workers=4,
# collate_fn=collater)
# count = 0
# for data in tqdm(train_loader):
# images, labels = data['image'], data['label']
# print(images.shape, labels.shape)
# print(images.dtype, labels.dtype)
# if count < 10:
# count += 1
# else:
# break | 2.59375 | 3 |
models/Lightweight/MobileNetV1.py | Dou-Yu-xuan/deep-learning-visal | 150 | 12763616 | <filename>models/Lightweight/MobileNetV1.py
import torch
import torch.nn as nn
import torchvision
def BottleneckV1(in_channels, out_channels, stride):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels,out_channels=in_channels,kernel_size=3,stride=stride,padding=1,groups=in_channels),
nn.BatchNorm2d(in_channels),
nn.ReLU6(inplace=True),
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1),
nn.BatchNorm2d(out_channels),
nn.ReLU6(inplace=True)
)
class MobileNetV1(nn.Module):
def __init__(self, num_classes=1000):
super(MobileNetV1, self).__init__()
self.first_conv = nn.Sequential(
nn.Conv2d(in_channels=3,out_channels=32,kernel_size=3,stride=2,padding=1),
nn.BatchNorm2d(32),
nn.ReLU6(inplace=True),
)
self.bottleneck = nn.Sequential(
BottleneckV1(32, 64, stride=1),
BottleneckV1(64, 128, stride=2),
BottleneckV1(128, 128, stride=1),
BottleneckV1(128, 256, stride=2),
BottleneckV1(256, 256, stride=1),
BottleneckV1(256, 512, stride=2),
BottleneckV1(512, 512, stride=1),
BottleneckV1(512, 512, stride=1),
BottleneckV1(512, 512, stride=1),
BottleneckV1(512, 512, stride=1),
BottleneckV1(512, 512, stride=1),
BottleneckV1(512, 1024, stride=2),
BottleneckV1(1024, 1024, stride=1),
)
self.avg_pool = nn.AvgPool2d(kernel_size=7,stride=1)
self.linear = nn.Linear(in_features=1024,out_features=num_classes)
self.dropout = nn.Dropout(p=0.2)
self.softmax = nn.Softmax(dim=1)
self.init_params()
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
nn.init.constant_(m.bias,0)
elif isinstance(m, nn.Linear) or isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.first_conv(x)
x = self.bottleneck(x)
x = self.avg_pool(x)
x = x.view(x.size(0),-1)
x = self.dropout(x)
x = self.linear(x)
out = self.softmax(x)
return out
if __name__=='__main__':
model = MobileNetV1()
print(model)
input = torch.randn(1, 3, 224, 224)
out = model(input)
print(out.shape)
| 2.578125 | 3 |
database_delivery_sdk/api/dbtask/create_custom_dbtask_pb2.py | easyopsapis/easyops-api-python | 5 | 12763617 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: create_custom_dbtask.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='create_custom_dbtask.proto',
package='dbtask',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x1a\x63reate_custom_dbtask.proto\x12\x06\x64\x62task\"\x94\x02\n\x19\x43reateCustomDBTaskRequest\x12\x38\n\x06\x64\x62Task\x18\x01 \x01(\x0b\x32(.dbtask.CreateCustomDBTaskRequest.DbTask\x1a\xbc\x01\n\x06\x44\x62Task\x12\x13\n\x0b\x64\x62ServiceId\x18\x01 \x01(\t\x12\x45\n\tchangeCfg\x18\x02 \x03(\x0b\x32\x32.dbtask.CreateCustomDBTaskRequest.DbTask.ChangeCfg\x1aV\n\tChangeCfg\x12\x14\n\x0c\x64\x62InstanceId\x18\x01 \x01(\t\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x10\n\x08password\x18\x03 \x01(\t\x12\x0f\n\x07sqlText\x18\x04 \x01(\t\",\n\x1a\x43reateCustomDBTaskResponse\x12\x0e\n\x06taskId\x18\x01 \x01(\t\"\x87\x01\n!CreateCustomDBTaskResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x30\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\".dbtask.CreateCustomDBTaskResponseb\x06proto3')
)
_CREATECUSTOMDBTASKREQUEST_DBTASK_CHANGECFG = _descriptor.Descriptor(
name='ChangeCfg',
full_name='dbtask.CreateCustomDBTaskRequest.DbTask.ChangeCfg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dbInstanceId', full_name='dbtask.CreateCustomDBTaskRequest.DbTask.ChangeCfg.dbInstanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='username', full_name='dbtask.CreateCustomDBTaskRequest.DbTask.ChangeCfg.username', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='password', full_name='dbtask.CreateCustomDBTaskRequest.DbTask.ChangeCfg.password', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sqlText', full_name='dbtask.CreateCustomDBTaskRequest.DbTask.ChangeCfg.sqlText', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=229,
serialized_end=315,
)
_CREATECUSTOMDBTASKREQUEST_DBTASK = _descriptor.Descriptor(
name='DbTask',
full_name='dbtask.CreateCustomDBTaskRequest.DbTask',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dbServiceId', full_name='dbtask.CreateCustomDBTaskRequest.DbTask.dbServiceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='changeCfg', full_name='dbtask.CreateCustomDBTaskRequest.DbTask.changeCfg', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CREATECUSTOMDBTASKREQUEST_DBTASK_CHANGECFG, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=127,
serialized_end=315,
)
_CREATECUSTOMDBTASKREQUEST = _descriptor.Descriptor(
name='CreateCustomDBTaskRequest',
full_name='dbtask.CreateCustomDBTaskRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dbTask', full_name='dbtask.CreateCustomDBTaskRequest.dbTask', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CREATECUSTOMDBTASKREQUEST_DBTASK, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=39,
serialized_end=315,
)
_CREATECUSTOMDBTASKRESPONSE = _descriptor.Descriptor(
name='CreateCustomDBTaskResponse',
full_name='dbtask.CreateCustomDBTaskResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='taskId', full_name='dbtask.CreateCustomDBTaskResponse.taskId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=317,
serialized_end=361,
)
_CREATECUSTOMDBTASKRESPONSEWRAPPER = _descriptor.Descriptor(
name='CreateCustomDBTaskResponseWrapper',
full_name='dbtask.CreateCustomDBTaskResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='dbtask.CreateCustomDBTaskResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='dbtask.CreateCustomDBTaskResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='dbtask.CreateCustomDBTaskResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='dbtask.CreateCustomDBTaskResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=364,
serialized_end=499,
)
_CREATECUSTOMDBTASKREQUEST_DBTASK_CHANGECFG.containing_type = _CREATECUSTOMDBTASKREQUEST_DBTASK
_CREATECUSTOMDBTASKREQUEST_DBTASK.fields_by_name['changeCfg'].message_type = _CREATECUSTOMDBTASKREQUEST_DBTASK_CHANGECFG
_CREATECUSTOMDBTASKREQUEST_DBTASK.containing_type = _CREATECUSTOMDBTASKREQUEST
_CREATECUSTOMDBTASKREQUEST.fields_by_name['dbTask'].message_type = _CREATECUSTOMDBTASKREQUEST_DBTASK
_CREATECUSTOMDBTASKRESPONSEWRAPPER.fields_by_name['data'].message_type = _CREATECUSTOMDBTASKRESPONSE
DESCRIPTOR.message_types_by_name['CreateCustomDBTaskRequest'] = _CREATECUSTOMDBTASKREQUEST
DESCRIPTOR.message_types_by_name['CreateCustomDBTaskResponse'] = _CREATECUSTOMDBTASKRESPONSE
DESCRIPTOR.message_types_by_name['CreateCustomDBTaskResponseWrapper'] = _CREATECUSTOMDBTASKRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CreateCustomDBTaskRequest = _reflection.GeneratedProtocolMessageType('CreateCustomDBTaskRequest', (_message.Message,), {
'DbTask' : _reflection.GeneratedProtocolMessageType('DbTask', (_message.Message,), {
'ChangeCfg' : _reflection.GeneratedProtocolMessageType('ChangeCfg', (_message.Message,), {
'DESCRIPTOR' : _CREATECUSTOMDBTASKREQUEST_DBTASK_CHANGECFG,
'__module__' : 'create_custom_dbtask_pb2'
# @@protoc_insertion_point(class_scope:dbtask.CreateCustomDBTaskRequest.DbTask.ChangeCfg)
})
,
'DESCRIPTOR' : _CREATECUSTOMDBTASKREQUEST_DBTASK,
'__module__' : 'create_custom_dbtask_pb2'
# @@protoc_insertion_point(class_scope:dbtask.CreateCustomDBTaskRequest.DbTask)
})
,
'DESCRIPTOR' : _CREATECUSTOMDBTASKREQUEST,
'__module__' : 'create_custom_dbtask_pb2'
# @@protoc_insertion_point(class_scope:dbtask.CreateCustomDBTaskRequest)
})
_sym_db.RegisterMessage(CreateCustomDBTaskRequest)
_sym_db.RegisterMessage(CreateCustomDBTaskRequest.DbTask)
_sym_db.RegisterMessage(CreateCustomDBTaskRequest.DbTask.ChangeCfg)
CreateCustomDBTaskResponse = _reflection.GeneratedProtocolMessageType('CreateCustomDBTaskResponse', (_message.Message,), {
'DESCRIPTOR' : _CREATECUSTOMDBTASKRESPONSE,
'__module__' : 'create_custom_dbtask_pb2'
# @@protoc_insertion_point(class_scope:dbtask.CreateCustomDBTaskResponse)
})
_sym_db.RegisterMessage(CreateCustomDBTaskResponse)
CreateCustomDBTaskResponseWrapper = _reflection.GeneratedProtocolMessageType('CreateCustomDBTaskResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _CREATECUSTOMDBTASKRESPONSEWRAPPER,
'__module__' : 'create_custom_dbtask_pb2'
# @@protoc_insertion_point(class_scope:dbtask.CreateCustomDBTaskResponseWrapper)
})
_sym_db.RegisterMessage(CreateCustomDBTaskResponseWrapper)
# @@protoc_insertion_point(module_scope)
| 1.148438 | 1 |
script/nightly_assess.py | gaoming714/arkham | 1 | 12763618 | <reponame>gaoming714/arkham<gh_stars>1-10
"""
download data from jqdatasdk
need finance.pickle
"""
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
import jqdatasdk as jq
import pickle
from datetime import datetime, timedelta
import time
import pytz
import numpy as np
import pandas as pd
from util.util import haunter
LOG = haunter("nightly_assess")
BASELINE = "000001.XSHG" # SZZS - 000001
PE_HS300 = None
CACHE_PATH = os.path.join(BASE_DIR,"cache")
CACHE_NIGHTLY = os.path.join(BASE_DIR,"cache","nightly")
# stamp_file = os.path.join(cache_runtime,"stamp.pickle")
ticker_list = [
"601318.XSHG", # 中国平安
# "601398.XSHG", # 工商银行
# "601288.XSHG", # 农业银行
"600036.XSHG", # 招商银行
# "601988.XSHG", # 中国银行
# "601166.XSHG", # 兴业银行
# "600000.XSHG", # 浦发银行
# "000001.XSHE", # 平安银行
# "600016.XSHG", # 民生银行
# "601328.XSHG", # 交通银行
"002142.XSHE", # 宁波银行
# "601998.XSHG", # 中信银行
# "601818.XSHG", # 光大银行
# "000333.XSHE", # 美的集团
# "000651.XSHE", # 格力电器
# "002032.XSHE", # 苏泊尔
# "002508.XSHE", # 老板电器
# "002242.XSHE", # 九阳股份
# "603868.XSHG", # 飞科电器
"600030.XSHG", # 中信证券
"300059.XSHE", # 东方财富
"600837.XSHG", # 海通证券
"601688.XSHG", # 华泰证券
"601211.XSHG", # 国泰君安
"600999.XSHG", # 招商证券
"600109.XSHG", # 国金证券
"603288.XSHG", # 海天味业
"600298.XSHG", # 安琪酵母
"600887.XSHG", # 伊利股份
# "002714.XSHE", # 牧原股份
"600519.XSHG", # 贵州茅台
"000858.XSHE", # 五粮液
"600809.XSHG", # 山西汾酒
# "603160.XSHG", # 汇顶科技
# "603501.XSHG", # 韦尔股份
# "603986.XSHG", # 兆易创新
"002916.XSHE", # 深南电路
"002463.XSHE", # 沪电股份
# "603228.XSHG", # 景旺电子
# "002475.XSHE", # 立讯精密
"300750.XSHE", # 宁德时代
"300014.XSHE", # 亿纬锂能
"002415.XSHE", # 海康威视
# "002236.XSHE", # 大华股份
# "601138.XSHG", # 工业富联
"002352.XSHE", # 顺丰控股
"002120.XSHE", # 韵达股份
# "002174.XSHE", # 游族网络 x
# "603444.XSHG", # 吉比特
# "002555.XSHE", # 三七互娱
# "002624.XSHE", # 完美世界 x
# "002027.XSHE", # 分众传媒 x
# "300033.XSHE", # 同花顺
# "600801.XSHG", # 华新水泥
# "600585.XSHG", # 海螺水泥
"002271.XSHE", # 东方雨虹
"600009.XSHG", # 上海机场
# "601100.XSHG", # 恒立液压
"600031.XSHG", # 三一重工
# "002572.XSHE", # 索菲亚
"603816.XSHG", # 顾家家居
"603833.XSHG", # 欧派家居
"600763.XSHG", # 通策医疗
"600079.XSHG", # 人福医药
"601799.XSHG", # 星宇股份
"600048.XSHG", # 保利地产
"300413.XSHE", # 芒果超媒
"600660.XSHG", # 福耀玻璃
]
special_dict = {
"601318.XSHG" : [None, 0, None], # 中国平安
"601398.XSHG" : [None, 0, None], # 工商银行
"601288.XSHG" : [None, 0, None], # 农业银行
"600036.XSHG" : [None, 0, None], # 招商银行
"601988.XSHG" : [None, 0, None], # 中国银行
"601166.XSHG" : [None, 0, None], # 兴业银行
"600000.XSHG" : [None, 0, None], # 浦发银行
"000001.XSHE" : [None, 0, None], # 平安银行
"600016.XSHG" : [None, 0, None], # 民生银行
"601328.XSHG" : [None, 0, None], # 交通银行
"002142.XSHE" : [None, 0, None], # 宁波银行
"601998.XSHG" : [None, 0, None], # 中信银行
"601818.XSHG" : [None, 0, None], # 光大银行
"000333.XSHE" : [None, 0, None], # 美的集团
"000651.XSHE" : [None, 0, None], # 格力电器
# "002032.XSHE" : [None, 0, None], # 苏泊尔
"002508.XSHE" : [None, 0, None], # 老板电器
# "002242.XSHE" : [None, 0, None], # 九阳股份
"603868.XSHG" : [None, 0, None], # 飞科电器
"600519.XSHG" : [None, 20, None], # 贵州茅台
"600048.XSHG" : [None, 0, None], # 保利地产
"600030.XSHG" : [None, 27, None], # 中信证券
"300059.XSHE" : [None, 27, None], # 东方财富
"600837.XSHG" : [None, 27, None], # 海通证券
"601688.XSHG" : [None, 27, None], # 华泰证券
"601211.XSHG" : [None, 27, None], # 国泰君安
"600999.XSHG" : [None, 27, None], # 招商证券
"600109.XSHG" : [None, 27, None], # 国金证券
"600009.XSHG" : [None, None, 27.66], # 上海机场
}
def get_baseline_PE():
"""
get HS300 PE TTM as baseline
update global PE_HS300
save nightly PE_HS300.pickle
"""
# import script.nightly_HS300 as hs300
# global PE_HS300
# PE_HS300 = hs300.launch()
global PE_HS300
finance_df = None
with open(os.path.join(CACHE_NIGHTLY, "finance.pickle"), 'rb') as f:
finance_df = pickle.load(f)
index_df = None
with open(os.path.join(CACHE_NIGHTLY, "weights.pickle"), 'rb') as f:
weights_dict = pickle.load(f)
index_df = weights_dict["399300.XSHE"]
pure_df = pd.merge(index_df, finance_df, left_index=True, right_index=True, how='left')
PE_HS300 = (pure_df['circulating_market_cap'].sum() /
(pure_df['circulating_market_cap'] /
pure_df["pe_ratio"]).sum())
# print(pure_df.loc["000001.XSHE"])
print("当前市场PE(沪深300) => ", PE_HS300)
# save to file
LOG.info("ticker_map dumps to file, PE_HS300.pickle")
with open(os.path.join(CACHE_PATH, 'nightly', 'PE_HS300.pickle'), 'wb') as f:
pickle.dump({"HS300":PE_HS300}, f)
def core(name = None, pe = None, roe = None, inc = None):
"""
<<< score
"""
# assess_down & up
if inc != 0:
assess_down = roe * ((inc * 2.7 - roe) / 100 + 1)
assess_up = roe * ((inc * 2.7 - roe) / 100 + 1) * ((inc * 2.7 - roe) / 100 + 1)
else:
assess_down = roe * ((inc * 2.7 - roe) / 100 + 1) * ((inc * 2.7 - roe) / 100 + 1) * 0.5 * 1.5
assess_up = roe * ((inc * 2.7 - roe) / 100 + 1) * 0.5 * 1.5
# dynamic based on HS300 , PE = 13
assess_down = assess_down * PE_HS300 / 13
assess_up = assess_up * PE_HS300 / 13
# risk ----- s-1 -----assess_up------ s-2 ----
fix_level = assess_up
if pe >= fix_level:
risk = - (pe - fix_level) * 2
else:
risk = - (fix_level - pe)
#score
assess_wave = (assess_up - assess_down) / assess_up * 100
# risk = - (pe - assess_down * 0.4)
score = roe * 2 + assess_wave * 1 + risk * 2
# score = roe * 2 + assess_wave * 1 + risk
return assess_down, assess_up, score
def launch():
try:
df = None
with open(os.path.join(CACHE_NIGHTLY, "finance.pickle"), 'rb') as f:
df = pickle.load(f)
except:
LOG.warning("Fail to load finance.pickle")
return 1
## expand changed column
df["changed"] = pd.Series("", name="changed", index=df.index)
# tmp_df = pd.DataFrame("", columns=["changed"], index=df.index)
# df = pd.merge(df, tmp_df, how="inner", left_index=True, right_index=True)
## update special data
for item in df.index:
if item in special_dict:
if special_dict[item][0] != None:
df.at[item, 'roe'] = special_dict[item][0]
df.at[item, 'changed'] = " <<roe"
if special_dict[item][1] != None:
df.at[item, 'inc'] = special_dict[item][1]
df.at[item, 'changed'] = " <inc"
if special_dict[item][2] != None:
df.at[item, 'pe_ratio'] = special_dict[item][2]
df.at[item, 'changed'] = "<<< PE"
# make core finance_plus
core_lambda = lambda row: core(row.name, row["pe_ratio"], row["roe"], row["inc"])
ss = df.apply(core_lambda, axis=1)
df[["assess_down","assess_up","score"]] = ss.apply(pd.Series)
# save to data.pickle
LOG.info("save finance_plus")
with open(os.path.join(CACHE_NIGHTLY, "finance_plus.pickle"), 'wb') as f:
pickle.dump(df, f)
def show_table():
df = None
with open(os.path.join(CACHE_NIGHTLY, "finance_plus.pickle"), 'rb') as f:
df = pickle.load(f)
with open(os.path.join(CACHE_PATH, "code_mapping.pickle"), 'rb') as f:
mapping = pickle.load(f)
sub_df = df.loc[ticker_list]
sub_df = sub_df.sort_values(by='score',ascending=False)
# add display name
sub_df["display_name"] = mapping['display_name'][sub_df.index]
# sub_df.to_csv("Result.csv",encoding='utf_8_sig')
terminal_df = sub_df[["roe", "inc", "pe_ratio","pe_ratio_lyr",
"assess_down", "assess_up", "score", "display_name", "changed", ]]
LOG.info(terminal_df)
pd.set_option('display.unicode.ambiguous_as_wide', True)
pd.set_option('display.unicode.east_asian_width', True)
pd.set_option('display.width', 180) # 设置打印宽度(**重要**)
print(terminal_df)
if __name__ == '__main__':
get_baseline_PE()
launch()
show_table() | 2.15625 | 2 |
bst.py | RMalsonR/BinarySearchTree | 0 | 12763619 | class Node(object):
value = None
left_child = None
right_child = None
def __init__(self, value, left=None, right=None):
self.value = value
if left:
self.left_child = left
if right:
self.right_child = right
def __str__(self):
return self.value
def has_left(self):
return True if self.left_child else False
def has_right(self):
return True if self.right_child else False
class BinaryTree(object):
root: Node = None
def __init__(self, node: Node = None):
if not self.root and node:
self.root = node
def __add__(self, node: Node, parent: Node = None):
if not self.root:
self.__init__(node=node)
else:
if parent:
if parent.value >= node.value:
if parent.has_right():
self.__add__(node=node, parent=parent.right_child)
else:
parent.right_child = node
else:
if parent.has_left():
self.__add__(node=node, parent=parent.left_child)
else:
parent.left_child = node
else:
self.__add__(node=node, parent=self.root)
def search_back(self, number, node: Node, level_count):
if number == node.value:
return level_count, True
else:
if number < node.value:
if node.has_left():
self.search_back(number=number, node=node.left_child, level_count=level_count + 1)
else:
return False
else:
if node.has_right():
self.search_back(number=number, node=node.right_child, level_count=level_count + 1)
else:
return False
def search(self, number):
return self.search_back(number=number, node=self.root, level_count=0)
def print_level(self, level_count, node: Node, result: list):
if not node:
return
else:
if level_count == 0:
result.append(node)
self.print_level(level_count=level_count - 1, node=node.left_child, result=result)
self.print_level(level_count=level_count - 1, node=node.right_child, result=result)
def print_tree(self, result: list, node: Node):
result.append(node)
if node.has_left():
self.print_tree(result=result, node=node.left_child)
elif node.has_right():
self.print_tree(result=result, node=node.right_child)
def height(self, node: Node):
if not node:
return 0
else:
if node.has_left():
l_height = self.height(node=node.left_child)
else:
l_height = -1
if node.has_right():
r_height = self.height(node=node.right_child)
else:
r_height = -1
max_height = l_height if l_height > r_height else r_height
return max_height + 1
def to_array_values(self, result: list, node: Node):
result.append(node.value)
if node.has_left():
self.to_array_values(result=result, node=node.left_child)
elif node.has_right():
self.to_array_values(result=result, node=node.right_child)
def to_array_nodes(self, result: list, node: Node):
result.append(node)
if node.has_left():
self.to_array_values(result=result, node=node.left_child)
elif node.has_right():
self.to_array_values(result=result, node=node.right_child)
def join_trees(bst_1: BinaryTree, bst_2: BinaryTree):
tree_array_1 = []
tree_array_2 = []
bst_1.to_array_values(tree_array_1, bst_1.root)
bst_2.to_array_values(tree_array_2, bst_2.root)
result_array = [*tree_array_1, *tree_array_2]
bst_result = BinaryTree()
for item in result_array:
node = Node(item)
bst_result.__add__(node=node)
return bst_result
| 3.875 | 4 |
sample/sample.py | eaybek/livequery | 0 | 12763620 | <reponame>eaybek/livequery<filename>sample/sample.py
from livequery.livequery import Livequery
class Livequery(object):
pass
| 1.21875 | 1 |
tests/asdf_tests/test_asdf_aws_schema.py | vhirtham/weldx | 0 | 12763621 | """Test ASDF serialization of AWS schema definitions."""
import pytest
# weld design -----------------------------------------------------------------
from weldx.asdf.tags.weldx.aws.design.base_metal import BaseMetal
from weldx.asdf.tags.weldx.aws.design.connection import Connection
# weld design -----------------------------------------------------------------
from weldx.asdf.tags.weldx.aws.design.joint_penetration import JointPenetration
from weldx.asdf.tags.weldx.aws.design.sub_assembly import SubAssembly
from weldx.asdf.tags.weldx.aws.design.weld_details import WeldDetails
from weldx.asdf.tags.weldx.aws.design.weldment import Weldment
from weldx.asdf.tags.weldx.aws.design.workpiece import Workpiece
# welding process -----------------------------------------------------------------
from weldx.asdf.tags.weldx.aws.process.arc_welding_process import ArcWeldingProcess
from weldx.asdf.tags.weldx.aws.process.gas_component import GasComponent
from weldx.asdf.tags.weldx.aws.process.shielding_gas_for_procedure import (
ShieldingGasForProcedure,
)
from weldx.asdf.tags.weldx.aws.process.shielding_gas_type import ShieldingGasType
from weldx.asdf.utils import _write_read_buffer
from weldx.constants import WELDX_QUANTITY as Q_
# iso groove -----------------------------------------------------------------
from weldx.welding.groove.iso_9692_1 import get_groove
def test_aws_example():
"""Test validity of current AWS Data Dictionary standard implementation."""
# welding process -----------------------------------------------------------------
gas_comp = [
GasComponent("argon", Q_(82, "percent")),
GasComponent("carbon dioxide", Q_(18, "percent")),
]
gas_type = ShieldingGasType(gas_component=gas_comp, common_name="SG")
gas_for_procedure = ShieldingGasForProcedure(
use_torch_shielding_gas=True,
torch_shielding_gas=gas_type,
torch_shielding_gas_flowrate=Q_(20, "l / min"),
)
arc_welding_process = ArcWeldingProcess("GMAW")
with pytest.raises(ValueError): # test for non viable process string
ArcWeldingProcess("NON_EXISTENT_PROCESS")
process = {
"arc_welding_process": arc_welding_process,
"shielding_gas": gas_for_procedure,
}
# weld design -----------------------------------------------------------------
v_groove = get_groove(
groove_type="VGroove",
workpiece_thickness=Q_(9, "mm"),
groove_angle=Q_(50, "deg"),
root_face=Q_(4, "mm"),
root_gap=Q_(2, "mm"),
)
u_groove = get_groove(
groove_type="UGroove",
workpiece_thickness=Q_(15, "mm"),
bevel_angle=Q_(9, "deg"),
bevel_radius=Q_(6, "mm"),
root_face=Q_(3, "mm"),
root_gap=Q_(1, "mm"),
)
joint_penetration = JointPenetration(
complete_or_partial="completePenetration", root_penetration=Q_(1.0, "mm")
)
weld_details = WeldDetails(
joint_design=v_groove, weld_sizes=Q_(320, "mm"), number_of_passes=1
)
weld_details2 = WeldDetails(
joint_design=u_groove, weld_sizes=Q_(320, "mm"), number_of_passes=1
)
connection1 = Connection(
joint_type="butt_joint",
weld_type="singleVGroove",
joint_penetration=joint_penetration,
weld_details=weld_details,
)
connection2 = Connection(
joint_type="butt_joint",
weld_type="singleUGroove",
joint_penetration=joint_penetration,
weld_details=weld_details2,
)
workpieces = [Workpiece(geometry="V-Groove")]
sub_assembly = [
SubAssembly(workpiece=workpieces, connection=connection1),
SubAssembly(workpiece=workpieces, connection=connection2),
]
weldment = Weldment(sub_assembly)
base_metal = BaseMetal("steel", "plate", Q_(10.3, "mm"))
tree = dict(process=process, weldment=weldment, base_metal=base_metal)
data = _write_read_buffer(tree)
assert isinstance(data, dict)
| 1.898438 | 2 |
lab_exercises/le_04-2020Fall/lab_exercise_04.py | arwhyte/SI506-practice | 12 | 12763622 | <reponame>arwhyte/SI506-practice
# START LAB EXERCISE 04
print('Lab Exercise 04 \n')
# SETUP
city_state = ["Detroit|MI", "Philadelphia|PA", "Hollywood|CA",
"Oakland|CA", "Boston|MA", "Atlanta|GA",
"Phoenix|AZ", "Birmingham|AL", "Houston|TX", "Tampa|FL"]
# END SETUP
# PROBLEM 1.0 (5 Points)
# PROBLEM 2.0 (5 Points)
# PROBLEM 3.0 (10 Points)
# END LAB EXERCISE | 2.0625 | 2 |
regex.py | deb17/regex | 0 | 12763623 | '''This module performs operations related to the re module.
Its functions are:
run_re - Run the requested re and format the input data with
highlighting.
check_re - Check if the re is valid (can be compiled).
clean_data - Remove all span tags from the input test data.
get_flags_value - Calculate the binary OR of the flags.
get_modified_data - Format the input test data with highlighting.
modify_input - Helper function to copy data from input to output.
format_result - Format result part of the page for proper display.
NOTE - In this webapp, I have decided to handle the highlighting myself
instead of using a third-party jquery plugin which is also an option.
One issue with jquery plugins is that they change the style of the
textarea, so a number of changes are necessary to make it match
bootstrap style.
TO-DO - Change every 2 spaces in testdiv and results part to a space and
an nbsp . Use `word-break: break-word` and - for firefox -
`overflow-wrap: break-word` in the css. Replace ' <' with
' <' to preserve spaces - may not be required in the
results part.
'''
import re
import html
def run_re(pattern, flags, testdata, modify='Y'):
'''Run the search method on the compiled re and format the test
data with highlighting.
1. Prepare the test data coming from the content editable div for
searching. Remove div, br and span tags and introduce newlines.
Replace non-breaking space with space character.
2. Compile and run the search method. Format the result by escaping
<, >, &, quote characters and introducing non-breaking spaces.
3. Run the finditer method on the regex to get all matches and
format the data with span tags to show highlighting.
'''
origdata = testdata
testdata = re.sub(r'<span(.*?)>', '', testdata)
testdata = testdata.replace('</span>', '')
if testdata.startswith('<div>'):
testdata = testdata[5:]
# Firefox introduces <br>. <br> is also present with <div> on blank
# lines.
testdata = testdata.replace('<br>', '')
testdata = testdata.replace('<div>', '\n')
testdata = testdata.replace('</div>', '')
# testdata = re.sub(r'<div>(.*?)</div>', r'\n\1', testdata)
testdata = testdata.replace(' ', ' ')
testdata = html.unescape(testdata)
calc_val = get_flags_value(flags)
try:
regex = re.compile(pattern, calc_val)
except Exception:
result = 'error'
mod_data = origdata
else:
match = regex.search(testdata)
if match is None:
result = (None, None, None)
else:
group = match.group().replace('\n', '\\n')
group = format_result(group)
groups = tuple(format_result(grp) for grp in match.groups())
groupdict = {k: format_result(v) for k, v
in match.groupdict().items()}
result = (group,
groups or None,
groupdict or None)
if modify == 'Y':
it = regex.finditer(testdata)
testdata = html.escape(testdata, quote=False)
mod_data = get_modified_data(testdata, it)
else:
mod_data = origdata
return result, mod_data
def check_re(pattern, flags):
'''Compile the re to check its validity.'''
calc_val = get_flags_value(flags)
try:
regex = re.compile(pattern, calc_val)
except Exception:
return False
return True
def clean_data(data):
'''Remove span tags introduced by this module and those inserted
automatically by the contenteditable div.
'''
data = re.sub(r'<span(.*?)>', '', data)
data = data.replace('</span>', '')
return data
def get_flags_value(flags):
'''Calculate bitwise OR of flags. Flags considered are -
IGNORECASE, DOTALL, VERBOSE, ASCII, MULTILINE.
LOCALE flag has been ignored because the official HOWTO for
Regular Expressions discourages its use. Also this flag would affect
the server and not the client.
'''
val = 0
if flags[0] == 'i':
val |= re.I
if flags[1] == 's':
val |= re.S
if flags[2] == 'x':
val |= re.X
if flags[3] == 'a':
val |= re.A
if flags[4] == 'm':
val |= re.M
return val
def get_modified_data(data, it):
'''Format the test data string used in the re search with HTML tags.
1. Read the input (in data variable) character by character. See
docstring for modify_input function.
2. If match object starts starts at a character, introduce a span
tag with class hilite in the output (the modified variable).
3. If match object ends at a character, introduce closing span tag.
4. Take care to close the span tag and start a new span if a newline
is encountered when a span tag has not yet been closed.
5. Replace all spaces by non-breaking spaces.
6. When iterator is exhausted, copy remaining input to output.
7. Introduce opening and closing div tags where there are newlines.
'''
modified = ''
cnt = 0
i = 0
starttag = False
try:
mo = next(it)
while cnt < len(data):
if i == mo.span()[0]:
if data[cnt] != '\n':
modified += '<span class="hilite">'
modified, cnt = modify_input(modified, data, cnt)
else:
modified += '\n'
modified += '<span class="hilite">'
cnt += 1
starttag = True
elif i == mo.span()[1]:
modified += '</span>'
starttag = False
mo = next(it)
if i == mo.span()[0]:
i -= 1
else:
modified, cnt = modify_input(modified, data, cnt)
elif starttag and data[cnt] == '\n':
modified += '</span>\n<span class="hilite">'
cnt += 1
else:
modified, cnt = modify_input(modified, data, cnt)
i += 1
if starttag: modified += '</span>'
except StopIteration:
modified += data[cnt:].replace(' ', ' ')
output = ''
first = True
for c in modified:
if c == '\n' and first:
output += '<div>'
first = False
elif c == '\n':
output += '</div><div>'
else:
output += c
if not first:
output += '</div>'
# introduce br tags to effect blank lines.
output = output.replace('<div></div>', '<div><br></div>')
output = output.replace('<div><span class="hilite"></span></div>',
'<div><span class="hilite"><br></span></div>')
if output.startswith('<div>'):
output = '<div><br></div>' + output
return output
def modify_input(modified, data, cnt):
'''Copy input character to output, taking care copy escaped
characters. It is necessary to escape the test data before modifying
it because once the tags are introduced, the data cannot be escaped.
Introduce non-breaking spaces.
'''
charrefs = ('<', '>', '&')
if data[cnt:cnt+4] in charrefs:
modified += data[cnt:cnt+4]
cnt += 4
elif data[cnt:cnt+5] in charrefs:
modified += data[cnt:cnt+5]
cnt += 5
else:
modified += ' ' if data[cnt] == ' ' else data[cnt]
cnt += 1
return modified, cnt
def format_result(data):
'''Format result (whole match, groups and group dict) for displaying
on a webpage.
'''
if not data: return data # data may be None
data = html.escape(data, quote=False)
data = data.replace(' ', ' ')
return data
| 3.28125 | 3 |
bloom/editor/map_objects/sector_collection.py | thomasrogers03/bloom | 9 | 12763624 | # Copyright 2020 <NAME>
# SPDX-License-Identifier: Apache-2.0
import typing
from panda3d import bullet, core
from ... import audio, constants, editor, game_map, map_data, seq
from .. import (
event_grouping,
marker_constants,
plane,
ror_constants,
sector_geometry,
sprite_find_sector,
undo_stack,
)
from . import empty_object, geometry_highlight, sprite, wall
from .drawing import sector as drawing_sector
from .sector import EditorSector
class SectorCollection:
def __init__(
self,
map_to_load: game_map.Map,
audio_manager: audio.Manager,
seq_manager: seq.Manager,
geometry_factory: sector_geometry.SectorGeometryFactory,
suggest_sky_picnum: typing.Callable[[int], int],
undos: undo_stack.UndoStack,
):
self._audio_manager = audio_manager
self._seq_manager = seq_manager
self._geometry_factory = geometry_factory
self._suggest_sky_picnum = suggest_sky_picnum
self._event_groupings = event_grouping.EventGroupingCollection()
self._undo_stack = undos
self._sectors: typing.List[EditorSector] = []
sprite_mapping: typing.Dict[int, sprite.EditorSprite] = {}
marker_sprite_mapping: typing.Dict[int, sprite.EditorSprite] = {}
for sector_index, map_sector in enumerate(map_to_load.sectors):
self.new_sector(map_sector).load(
map_to_load, sector_index, sprite_mapping, marker_sprite_mapping
)
lower_sectors: typing.Dict[int, EditorSector] = {}
upper_sectors: typing.Dict[int, EditorSector] = {}
for blood_sprite in map_to_load.sprites:
if blood_sprite.sprite.tags[0] in ror_constants.LOWER_LINK_TYPES:
lower_sectors[blood_sprite.data.data1] = self._sectors[
blood_sprite.sprite.sector_index
]
elif blood_sprite.sprite.tags[0] in ror_constants.UPPER_LINK_TYPES:
upper_sectors[blood_sprite.data.data1] = self._sectors[
blood_sprite.sprite.sector_index
]
all_objects: typing.List[empty_object.EmptyObject] = []
for editor_sector in self._sectors:
editor_sector.setup_walls_and_sprites_after_load(
self._sectors,
map_to_load,
lower_sectors,
upper_sectors,
sprite_mapping,
marker_sprite_mapping,
)
all_objects.append(editor_sector)
for editor_wall in editor_sector.walls:
all_objects.append(editor_wall)
for editor_sprite in editor_sector.sprites:
all_objects.append(editor_sprite)
self._event_groupings.load(all_objects)
@property
def sectors(self) -> typing.List[EditorSector]:
return self._sectors
@property
def event_groupings(self):
return self._event_groupings
@property
def undos(self):
return self._undo_stack
def destroy_sector(self, sector_to_destroy: EditorSector):
sector_to_destroy.destroy()
self.sectors.remove(sector_to_destroy)
def create_sector(self, template: EditorSector):
new_build_sector = template.sector.sector.copy()
new_build_sector.tags[0] = 0
new_blood_sector = map_data.sector.Sector(
sector=new_build_sector, data=map_data.sector.BloodSectorData()
)
return self.new_sector(new_blood_sector)
def create_empty_sector(self):
return self.new_sector(map_data.sector.Sector())
def new_sector(self, blood_sector: game_map.sector.Sector):
index = len(self._sectors)
new_sector = EditorSector(
blood_sector,
str(index),
self._audio_manager,
self._seq_manager,
self._geometry_factory,
self._suggest_sky_picnum,
self._undo_stack,
)
def _undo():
self.destroy_sector(new_sector)
def _redo():
new_sector.undestroy()
self._sectors.append(new_sector)
operation = undo_stack.SimpleUndoableOperation("Add Sector", _undo, _redo)
operation.apply()
self._undo_stack.add_operation(operation)
return new_sector
def prepare_to_persist(
self,
find_sector: typing.Callable[
["editor.sector.EditorSector", core.Point3], "editor.sector.EditorSector"
],
builder_position: core.Point3,
):
blood_sectors: typing.List[map_data.sector.Sector] = []
blood_walls: typing.List[map_data.wall.Wall] = []
blood_sprites: typing.List[map_data.sprite.Sprite] = []
sector_index_mapping: typing.Dict[EditorSector, int] = {}
wall_index_mapping: typing.Dict[wall.EditorWall, int] = {}
sprite_index_mapping: typing.Dict[sprite.EditorSprite, int] = {}
marker_id = marker_constants.START_ID
for editor_sector in self._sectors:
self._reset_tx_rx(editor_sector)
for editor_sprite in editor_sector.sprites:
self._reset_tx_rx(editor_sprite)
for editor_wall in editor_sector.walls:
self._reset_tx_rx(editor_wall)
self._event_groupings.prepare_to_persist()
for sector_index, editor_sector in enumerate(self._sectors):
sector_index_mapping[editor_sector] = sector_index
for editor_wall in editor_sector.walls:
wall_index_mapping[editor_wall] = len(wall_index_mapping)
for editor_sprite in editor_sector.sprites:
sprite_index_mapping[editor_sprite] = len(sprite_index_mapping)
for editor_sprite in editor_sector.markers:
if editor_sprite is not None:
editor_sprite.sprite.sprite.velocity_x = marker_id
sprite_index_mapping[editor_sprite] = len(sprite_index_mapping)
marker_id += 1
sector_index_map: typing.Dict[EditorSector, int] = {}
for editor_sector in self._sectors:
for editor_wall in editor_sector.walls:
blood_wall = editor_wall.prepare_to_persist(
sector_index_mapping, wall_index_mapping
)
blood_walls.append(blood_wall)
for editor_sprite in editor_sector.sprites:
sprite_find_sector.SpriteFindSector(
editor_sprite, self._sectors
).update_sector()
blood_sprite = editor_sprite.prepare_to_persist(sector_index_mapping)
blood_sprites.append(blood_sprite)
markers = [-1, -1]
for marker_index, editor_marker in enumerate(editor_sector.markers):
if editor_marker is not None:
blood_sprite = editor_marker.prepare_to_persist(
sector_index_mapping
)
markers[marker_index] = blood_sprite.sprite.velocity_x
blood_sprites.append(blood_sprite)
blood_sector = editor_sector.prepare_to_persist(wall_index_mapping)
blood_sector.data.markers = markers
sector_index_map[editor_sector] = len(blood_sectors)
blood_sectors.append(blood_sector)
ror_data = 0
for editor_sector in self._sectors:
if editor_sector.sector_below_floor is not None:
ror_data += 1
sector_index = sector_index_map[editor_sector]
below_sector_index = sector_index_map[editor_sector.sector_below_floor]
blood_sprite = map_data.sprite.Sprite.new()
above_point = editor_sector.min_point()
blood_sprite.sprite.position_x = int(above_point.x)
blood_sprite.sprite.position_y = int(above_point.y)
blood_sprite.sprite.position_z = editor.to_build_height(
editor_sector.floor_z
)
blood_sprite.sprite.tags[0] = ror_constants.UPPER_TAG_MAPPING[
editor_sector.ror_type
]
blood_sprite.sprite.sector_index = sector_index
blood_sprite.data.data1 = ror_data
blood_sprites.append(blood_sprite)
blood_sprite = map_data.sprite.Sprite.new()
below_point = editor_sector.sector_below_floor.min_point()
blood_sprite.sprite.position_x = int(below_point.x)
blood_sprite.sprite.position_y = int(below_point.y)
blood_sprite.sprite.position_z = editor.to_build_height(
editor_sector.sector_below_floor.ceiling_z
)
blood_sprite.sprite.tags[0] = ror_constants.LOWER_TAG_MAPPING[
editor_sector.ror_type
]
blood_sprite.sprite.sector_index = below_sector_index
blood_sprite.data.data1 = ror_data
blood_sprites.append(blood_sprite)
builder_sector = find_sector(None, builder_position)
if builder_sector is not None:
builder_sector_index = sector_index_mapping[builder_sector]
else:
builder_sector_index = -1
return blood_sectors, blood_walls, blood_sprites, builder_sector_index
@staticmethod
def _reset_tx_rx(map_object: empty_object.EmptyObject):
data = map_object.get_data()
data.tx_id = 0
data.rx_id = 0
| 1.90625 | 2 |
thrift/__init__.py | PythonRebirth/SB | 1 | 12763625 | __all__ = ['unverting', 'Boxup']
| 1.179688 | 1 |
Python-ML/PP04/code/PCA.py | JasonFil/Python-ML | 4 | 12763626 | <gh_stars>1-10
'''
Created on Nov 29, 2012
@author: jason
'''
from inspect import stack
import pandas as pd
import numpy as np
import os
from util.mlExceptions import DatasetError, LogicalError
from scipy.sparse import lil_matrix
from scipy.sparse.linalg import eigs
from numpy.linalg import eig
# Global constant for current function name
CURR_FUNC_NAME = stack()[0][3]
def pca(df, k, axis = 0):
"""
This PCA implementation is geared towards solving question 1 of the programming assignment.
Effectively, the df array is constrained to contain only three columns: userid, movieid and
rating. We will then create a sparse |unique_user_id| X |unique_movie_id| matrix (or the other
way round, dependent on the value of "axis", which we will then embed in k dimensions.
@param df: a two-dimensional pandas DataFrame with columns userid, itemid and rating
@param axis: which axis to treat as examples (0 or 1)
@param k: number of dimensions in embedding
@return numpy matrix of shape (m,k) where m is the number of unique userids in df when axis=0
and m is the number of unique itemids in df when axis=1
@raise DatasetError when the dataset provided is None or empty
@raise LogicalError when axis is neither 0 nor 1, or k <= 0
"""
# Sanity checking
if axis not in [0, 1]:
raise LogicalError, "Method %s: \"axis\" variable should be either 0 or 1 (provided: %s)." %(CURR_FUNC_NAME, str(axis))
if k <= 0 or not isinstance(k, int):
raise LogicalError, "Method %s: number k of embedding dimensions should be a positive integer (provided: %s)." %(CURR_FUNC_NAME, str(k))
if df is None or df.shape[0] == 0 or df.shape[1] == 0:
raise DatasetError, "Method %s: empty dataset provided." %(CURR_FUNC_NAME)
if len(df.columns.values) != 3:
raise DatasetError, "Method %s: the dataframe provided should have exactly 3 columns." %(CURR_FUNC_NAME)
if 'userid' not in df.columns.values[0] or 'itemid' not in df.columns.values or 'rating' not in df.columns.values:
raise DatasetError, "Method %s: the dataframe provided should have 3 columns named \"userid\", \"itemid\" and \"rating\"." %(CURR_FUNC_NAME)
# Load the dataframe values in a hash. It will make life easier.
ratingsHash = {}
for _row_indx, (userid, itemid, rating) in df.iterrows():
ratingsHash[(userid, itemid)] = rating
# We now need to make our m x n sparse array.
rowIndex = 'userid' if axis == 0 else 'itemid'
columnIndex = 'itemid' if axis == 0 else 'userid'
uniqueRows = df[rowIndex].unique()
uniqueCols = df[columnIndex].unique()
sparseArr = np.zeros((len(uniqueRows), len(uniqueCols))) # zerofill initially
for i in range(len(uniqueRows)):
for j in range(len(uniqueCols)):
if (uniqueRows[i], uniqueCols[j]) in ratingsHash:
sparseArr[i][j] = ratingsHash[(uniqueRows[i], uniqueCols[j])]
# Compute the covariance matrix
print "sparseArr shape: " + str(sparseArr.shape)
covMat = np.cov(sparseArr.T)
# A compressed representation is needed because we need to center sparse data.
csr_rep = lil_matrix(sparseArr).tocsr()
for c in range(csr_rep.shape[axis]):
sparseArr[c, :][sparseArr[c, :].nonzero()] -= np.mean(csr_rep.getcol(c).data)
# Find eigenvalues, compute and return k-dimensional embedding
print "covMat shape: " + str(covMat.shape)
eigenVals, eigenVecs = eigs(covMat, k)
# Re-arrange eigenvectors so that you get the most significant components first.
eigenVecs = eigenVecs[:, np.argsort(-eigenVals)]
return sparseArr.dot(eigenVecs)
def pca2(df, k, axis = 0):
"""
This PCA implementation is geared towards solving question 1 of the programming assignment.
Effectively, the df array is constrained to contain only three columns: userid, movieid and
rating. We will then create a sparse |unique_user_id| X |unique_movie_id| matrix (or the other
way round, dependent on the value of "axis", which we will then embed in k dimensions.
@param df: a two-dimensional pandas DataFrame with columns userid, itemid and rating
@param axis: which axis to treat as examples (0 or 1)
@param k: number of dimensions in embedding
@return numpy matrix of shape (m,k) where m is the number of unique userids in df when axis=0
and m is the number of unique itemids in df when axis=1
@raise DatasetError when the dataset provided is None or empty
@raise LogicalError when axis is neither 0 nor 1, or k <= 0
"""
# Sanity checking
if axis not in [0, 1]:
raise LogicalError, "Method %s: \"axis\" variable should be either 0 or 1 (provided: %s)." %(CURR_FUNC_NAME, str(axis))
if k <= 0 or not isinstance(k, int):
raise LogicalError, "Method %s: number k of embedding dimensions should be a positive integer (provided: %s)." %(CURR_FUNC_NAME, str(k))
if df is None or df.shape[0] == 0 or df.shape[1] == 0:
raise DatasetError, "Method %s: empty dataset provided." %(CURR_FUNC_NAME)
if len(df.columns.values) != 3:
raise DatasetError, "Method %s: the dataframe provided should have exactly 3 columns." %(CURR_FUNC_NAME)
if 'userid' not in df.columns.values[0] or 'itemid' not in df.columns.values or 'rating' not in df.columns.values:
raise DatasetError, "Method %s: the dataframe provided should have 3 columns named \"userid\", \"itemid\" and \"rating\"." %(CURR_FUNC_NAME)
# Load the dataframe values in a hash. It will make life easier.
ratingsHash = {}
for _row_indx, (userid, itemid, rating) in df.iterrows():
ratingsHash[(userid, itemid)] = rating
# We now need to make our m x n sparse array.
rowIndex = 'userid' if axis == 0 else 'itemid'
columnIndex = 'itemid' if axis == 0 else 'userid'
uniqueRows = df[rowIndex].unique()
uniqueCols = df[columnIndex].unique()
sparseArr = np.zeros((len(uniqueRows), len(uniqueCols))) # zerofill initially
for i in range(len(uniqueRows)):
for j in range(len(uniqueCols)):
if (uniqueRows[i], uniqueCols[j]) in ratingsHash:
sparseArr[i][j] = ratingsHash[(uniqueRows[i], uniqueCols[j])]
# Compute the covariance matrix
print "sparseArr shape: " + str(sparseArr.shape)
covMat = np.cov(sparseArr.T)
# A compressed representation is needed because we need to center sparse data.
csr_rep = lil_matrix(sparseArr).tocsr()
for c in range(csr_rep.shape[axis]):
sparseArr[c, :][sparseArr[c, :].nonzero()] -= np.mean(csr_rep.getcol(c).data)
# Find eigenvalues, compute and return k-dimensional embedding
print "covMat shape: " + str(covMat.shape)
eigenVals, eigenVecs = eig(covMat)
# Re-arrange eigenvectors so that you get the most significant components first.
eigenVecs = eigenVecs[:, np.argsort(-eigenVals)][:, :k]
return sparseArr.dot(eigenVecs)
if __name__ == '__main__':
os.chdir('../')
ratings_train = pd.load('proc_data/ratings_train.pda')
pca(ratings_train[['userid', 'itemid', 'rating']], 100, 0).real
pca2(ratings_train[['userid', 'itemid', 'rating']], 100, 1).real | 2.484375 | 2 |
enaml/qt/qt_html.py | xtuzy/enaml | 1,080 | 12763627 | <filename>enaml/qt/qt_html.py
#------------------------------------------------------------------------------
# Copyright (c) 2013-2017, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Typed
from enaml.widgets.html import ProxyHtml
from .QtWidgets import QTextEdit
from .qt_control import QtControl
class QtHtml(QtControl, ProxyHtml):
""" A Qt implementation of an Enaml ProxyHtml widget.
"""
#: A reference to the widget created by the proxy.
widget = Typed(QTextEdit)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying html widget.
"""
widget = QTextEdit(self.parent_widget())
widget.setReadOnly(True)
self.widget = widget
def init_widget(self):
""" Initialize the underlying widget.
"""
super(QtHtml, self).init_widget()
self.set_source(self.declaration.source)
#--------------------------------------------------------------------------
# ProxyHtml API
#--------------------------------------------------------------------------
def set_source(self, source):
""" Set the source of the html widget
"""
self.widget.setHtml(source)
| 1.890625 | 2 |
tests/settings.py | jnsdrtlf/django-feather | 17 | 12763628 | <filename>tests/settings.py
import os
BASE_DIR = os.path.dirname(__file__)
INSTALLED_APPS = (
"django.contrib.auth",
"django.contrib.sessions",
"django.contrib.contenttypes",
"django.contrib.admin",
"django_feather",
)
SECRET_KEY = "django-feather-test"
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}}
MIDDLEWARE_CLASSES = ("django.middleware.common.CommonMiddleware",)
SITE_ROOT = os.path.dirname(os.path.abspath(__file__))
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.contrib.messages.context_processors.messages",
],
"debug": True,
},
},
]
| 1.664063 | 2 |
lib/devops/simple_cache.py | jpotter/angel | 0 | 12763629 | <gh_stars>0
# This is a very light-weight persistent cache setup for devops use. This cache setup stores one file
# per key/value pair, typically under tmpfs (ram-backed / reset on reboot) so that data can be stored and accessed
# between runs, and without relying on any running services.
#
# Key/value pairs are expected to be simple types (strings, ints, or booleans). We're not pickling
# anything here specifically so that non-python processes can read the data, should we ever need to. We
# do, however, store the type info and cast back to those types, so that programming errors for failing
# to cast to ints don't cause unexpected bugs.
#
# This is primarily meant for monitoring-level data, where we can't rely on services. (If we can't rely
# on the filesystem, we have other problems...)
#
# It is not meant to be used for large numbers of keys or rapidly changing data.
import os
import random
import string
import sys
import time
def simple_cache_set(key, value, ttl_in_seconds=None):
''' Set the given key to the given value, optionally marking an expiry time. Give a value of None to clear a previous cache set. '''
f = _simple_cache_get_filename(key)
if f is None:
return -1
if value is None:
if os.path.isfile(f):
try:
os.remove(f)
except Exception as e:
print >>sys.stderr, "Error: unable to remove simple cache file %s: %s" % (f, e)
return -2
return 0
set_time = int(time.time())
expiry = ''
if ttl_in_seconds is not None:
expiry = int(time.time() + ttl_in_seconds)
type_str = type(value).__name__
try:
tmp_file = "%s-%s" % (f, str(random.random())[2:]) # Avoid race condition of partially-written file being read; avoid symlink overwrite scenario
open(tmp_file, 'w').write("%s\n%s\n%s\n%s" % (set_time, expiry, type_str, value))
os.rename(tmp_file, f)
except Exception as e:
print >>sys.stderr, "Error: unable to update simple cache file %s: %s" % (f, e)
return -3
return 0
def simple_cache_get(key, max_allowed_age_in_seconds=None, default=None, get_function=None, get_function_args=(), get_function_kwargs={}, get_function_ttl_in_seconds=None):
''' Get the given key from simple cache.
If the key isn't set and get_function is defined, call get_function(); if get_function returns a value, store the key for future gets, and return it.
If max_allowed_age_in_seconds is given, and the value was set before that time, default is returned. Note that this does not clear the cache value; future calls with longer durations will still return the stored value.
If default is given, then it is returned when no value can be found; it is never stored.
'''
if get_function is not None:
# Recurse through without the function:
value = simple_cache_get(key)
if value is not None:
return value
# If we get here, then we have a function to get the value, and it is not currently set.
value = get_function(*get_function_args, **get_function_kwargs)
if value is None:
return default
simple_cache_set(key, value, ttl_in_seconds=get_function_ttl_in_seconds)
return value
f = _simple_cache_get_filename(key)
if f is None:
return default
try:
if os.getuid() != os.stat(f).st_uid:
print >>sys.stderr, "Error: simple cache file %s owner mismatch (%s/%s)." % (f, os.getuid(), os.stat(f).st_uid)
return default
except:
# There's a race condition where checking if the file exists can work, but the cache can then timeout.
# Don't bother checking if file exists before stating it.
return default
raw_data = None
try:
raw_data = open(f).read()
except:
# Likewise, race condition where the file might have just been deleted
return default
try:
set_time, expiry, type_str, value = raw_data.split('\n',3)
if len(expiry):
if time.time() > int(expiry):
# Race condition of another process removing it; so remove; ignore errors; then check if it exists
try:
os.remove(f)
except Exception as e:
pass
if os.path.isfile(f):
print >>sys.stderr, "Error: unable to remove expired simple cache file %s (maybe another process re-added it?)" % (f)
return default
if max_allowed_age_in_seconds is not None:
current_age = time.time() - int(set_time)
if current_age > max_allowed_age_in_seconds:
return default
return eval(type_str)(value)
except Exception as e:
try:
os.remove(f)
print >>sys.stderr, "Error: unable to parse simple cache file %s (%s); removing it." % (f, e)
except Exception as e2:
print >>sys.stderr, "Error: unable to parse simple cache file %s (%s); unable to remove it (%s)." % (f, e, e2)
return default
def _simple_cache_get_filename(key):
if key is None:
print >>sys.stderr, "Error: simple cache given 'None' key."
return None
base_dir = '/dev/shm'
if not os.path.isdir(base_dir):
if 'TMPDIR' in os.environ:
base_dir = os.environ['TMPDIR']
if not os.path.isdir(base_dir):
base_dir = '/tmp'
valid_chars = "-_%s%s" % (string.ascii_letters, string.digits)
key = key.lower().replace('/','-')
filename_safe_key = ''.join(c for c in key if c in valid_chars)
return os.path.join(base_dir, 'angel-simplecache-1-%s-%s' % (os.getuid(), filename_safe_key[0:64]))
| 2.6875 | 3 |
app/app.py | ellie271/ErdosProject | 0 | 12763630 | import streamlit as st
import numpy as np
import pickle
from sklearn.tree import DecisionTreeClassifier
#model = DecisionTreeClassifier(max_depth=8)
model = pickle.load(open('model.pickle','rb'))
st.write("""
# CoverMyMeds - PA Approval Chances
""")
st.write("This project was done as part of the Erdos Data Science bootcamp Fall 2021. The data was provided by CoverMyMeds.")
st.header("User Information")
st.write("Please fill in the following information." )
bin = st.radio("Select the BIN of Insurance payer: ", ("417380","417614","417740","999001"))
drug = st.radio("Select the drug that you want covered: ", ("A","B","C"))
tried_failed = st.radio("Have you tried and failed the generic alternative?", ("Yes","No"))
contraindication = st.radio("Do you have an associated contraindication for the medication requested (i.e. is there any reason you cannot take this drug)?",("Yes","No"));
correct_diagnosis = st.radio("Do you have the corrected diagnosis for the associated drug?",("Yes","No"));
# Find reject code:
reject_code = 0;
if bin == "417380":
if drug == "A":
reject_code = 75;
elif drug == "B":
reject_code = 76;
elif drug == "C":
reject_code = 70;
elif bin == "417614":
if drug == "A":
reject_code = 70;
elif drug == "B":
reject_code = 75;
elif drug == "C":
reject_code = 76;
elif bin == "417740":
if drug == "A":
reject_code = 76;
elif drug == "B":
reject_code = 70;
elif drug == "C":
reject_code = 75;
elif bin == "999001":
reject_code = 76;
#Set features
d = {"Yes":1, "No":0} #Dictionary for Yes = 1, No = 0
cd = d[correct_diagnosis]
tf = d[tried_failed]
contra = d[contraindication]
drug_B = int(drug == "B")
drug_C = int(drug == "C")
bin_417614 = int(bin == "417614")
bin_417740 = int(bin == "417740")
bin_999001 = int(bin == "999001")
reject_code_75 = int(reject_code == 75)
reject_code_76 = int(reject_code == 76)
#Predict
pred = model.predict_proba([[cd,tf,contra,drug_B,drug_C,bin_417614,bin_417740,bin_999001, reject_code_75, reject_code_76]])
if tf == 0:
pred1 = model.predict_proba([[cd,1,contra,drug_B,drug_C,bin_417614,bin_417740,bin_999001, reject_code_75, reject_code_76]])
st.header("Result")
st.write("""The chances of your PA being approved are: **{}**""".format(np.round(100*pred[0,1],3)), "%.")
if tf == 0:
st.write("""In addition, if you first try the generic alternative but still need this drug, then the chances of your PA form being approved are: {}""".format(np.round(100*pred1[0,1],3)), "%.")
| 3.25 | 3 |
blogs/blog/db/post.py | caifeifei0329/blogs_study | 0 | 12763631 | import datetime
from sqlalchemy.orm import relationship
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey
from .base import Base
class Post(Base):
__tablename__ = "post"
id = Column(Integer, primary_key=True, autoincrement=True,)
title = Column(String)
content = Column(String)
create_at = Column(DateTime, default=datetime.datetime.now)
update_at = Column(DateTime, default=datetime.datetime.now)
user_id=Column(Integer,ForeignKey('user.id'))
user=relationship("User")
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True, autoincrement=True)
name= Column(String)
password= Column(String)
post=relationship("Post")
| 3.09375 | 3 |
SpaceHabitRPG/Models/BaseModel.py | joelliusp/SpaceHabit | 0 | 12763632 | <gh_stars>0
from AllDBFields import BaseFields
class BaseModel(object):
@classmethod
def get_dbFields(cls):
return BaseFields
def __init__(self):
self._changes = {}
self.dict = {}
def get_pk(self):
if not self.get_dbFields().PK_KEY in self.dict:
return None
return self.dict[self.get_dbFields().PK_KEY]
def set_common_property(self,key,value):
"""
I noticed that I was using the same two lines all over my setters.
So, I decided to just centralize it here.
"""
self.dict[key] = value
self._changes[key] = value | 2.53125 | 3 |
apps/paddlefold/alphafold_paddle/relax/utils_test.py | kanz76/PaddleHelix | 1 | 12763633 | # Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils."""
import os
from absl.testing import absltest
from alphafold_paddle.common import protein
from alphafold_paddle.relax import utils
import numpy as np
# Internal import (7716).
class UtilsTest(absltest.TestCase):
def test_overwrite_b_factors(self):
testdir = os.path.join(
absltest.get_default_test_srcdir(),
'alphafold/relax/testdata/'
'multiple_disulfides_target.pdb')
with open(testdir) as f:
test_pdb = f.read()
n_residues = 191
bfactors = np.stack([np.arange(0, n_residues)] * 37, axis=-1)
output_pdb = utils.overwrite_b_factors(test_pdb, bfactors)
# Check that the atom lines are unchanged apart from the B-factors.
atom_lines_original = [l for l in test_pdb.split('\n') if l[:4] == ('ATOM')]
atom_lines_new = [l for l in output_pdb.split('\n') if l[:4] == ('ATOM')]
for line_original, line_new in zip(atom_lines_original, atom_lines_new):
self.assertEqual(line_original[:60].strip(), line_new[:60].strip())
self.assertEqual(line_original[66:].strip(), line_new[66:].strip())
# Check B-factors are correctly set for all atoms present.
as_protein = protein.from_pdb_string(output_pdb)
np.testing.assert_almost_equal(
np.where(as_protein.atom_mask > 0, as_protein.b_factors, 0),
np.where(as_protein.atom_mask > 0, bfactors, 0))
if __name__ == '__main__':
absltest.main()
| 2.109375 | 2 |
backtrader/backtrader/feeds/chainer.py | harshabakku/live-back-testing-trader | 1 | 12763634 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016, 2017 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import datetime
import backtrader as bt
from backtrader.utils.py3 import range
class MetaChainer(bt.DataBase.__class__):
def __init__(cls, name, bases, dct):
'''Class has already been created ... register'''
# Initialize the class
super(MetaChainer, cls).__init__(name, bases, dct)
def donew(cls, *args, **kwargs):
'''Intercept const. to copy timeframe/compression from 1st data'''
# Create the object and set the params in place
_obj, args, kwargs = super(MetaChainer, cls).donew(*args, **kwargs)
if args:
_obj.p.timeframe = args[0]._timeframe
_obj.p.compression = args[0]._compression
return _obj, args, kwargs
class Chainer(bt.with_metaclass(MetaChainer, bt.DataBase)):
'''Class that chains datas'''
def islive(self):
'''Returns ``True`` to notify ``Cerebro`` that preloading and runonce
should be deactivated'''
return True
def __init__(self, *args):
self._args = args
def start(self):
super(Chainer, self).start()
for d in self._args:
d.setenvironment(self._env)
d._start()
# put the references in a separate list to have pops
self._ds = list(self._args)
self._d = self._ds.pop(0) if self._ds else None
self._lastdt = datetime.min
def stop(self):
super(Chainer, self).stop()
for d in self._args:
d.stop()
def get_notifications(self):
return [] if self._d is None else self._d.get_notifications()
def _gettz(self):
'''To be overriden by subclasses which may auto-calculate the
timezone'''
if self._args:
return self._args[0]._gettz()
return bt.utils.date.Localizer(self.p.tz)
def _load(self):
while self._d is not None:
if not self._d.next(): # no values from current data source
self._d = self._ds.pop(0) if self._ds else None
continue
# Cannot deliver a date equal or less than an alredy delivered
dt = self._d.datetime.datetime()
if dt <= self._lastdt:
continue
self._lastdt = dt
for i in range(self._d.size()):
self.lines[i][0] = self._d.lines[i][0]
return True
# Out of the loop -> self._d is None, no data feed to return from
return False
| 2.0625 | 2 |
tests/test_registration.py | NLeSC/python-pcl | 10 | 12763635 | from __future__ import print_function
import numpy as np
from numpy import cos, sin
from numpy.testing import assert_equal
import unittest
import pcl
from pcl.registration import icp, gicp, icp_nl, ia_ransac
bun0Tobun4 = [[0.85250509, -0.03745676, -0.52137518, 0.04118973],
[0.03552843, 0.99927479, -0.01369729, 0.00103067],
[0.52151012, -0.00684663, 0.8532176, 0.03994245],
[0., 0., 0., 1.]]
class TestICP(unittest.TestCase):
def setUpRandom(self):
# Check if ICP can find a mild rotation.
theta = [-.031, .4, .59]
rot_x = [[1, 0, 0],
[0, cos(theta[0]), -sin(theta[0])],
[0, sin(theta[0]), cos(theta[0])]]
rot_y = [[cos(theta[1]), 0, sin(theta[1])],
[0, 1, 0],
[-sin(theta[1]), 0, cos(theta[1])]]
rot_z = [[cos(theta[2]), -sin(theta[1]), 0],
[sin(theta[2]), cos(theta[1]), 0],
[0, 0, 1]]
transform = np.dot(rot_x, np.dot(rot_y, rot_z))
# print("---------")
# print("Rotation: ")
# print(transform[0:3,0:3])
# print("Translation: ", transform[3, 0:3])
# print("---------")
random_cloud = np.random.RandomState(42).randn(900, 3)
self.source = pcl.PointCloud(random_cloud.astype(np.float32))
a = np.dot(random_cloud, transform).astype(np.float32)
self.target = pcl.PointCloud(a)
def setUpBunny(self):
self.source = pcl.PointCloud()
self.source.from_file("tests/bun0.pcd")
self.target = pcl.PointCloud()
self.target.from_file("tests/bun4.pcd")
def setUp(self):
self.setUpBunny()
def check_algo(self, algo, max_iter=1000, **kwargs):
converged, transf, estimate, fitness = \
algo(self.source, self.target, max_iter=max_iter, **kwargs)
self.assertTrue(isinstance(transf, np.ndarray))
self.assertEqual(transf.shape, (4, 4))
np.testing.assert_allclose(bun0Tobun4, transf, 0, 0.1)
assert_equal(transf[3], [0, 0, 0, 1])
# XXX I think I misunderstand fitness, it's not equal to the following
# MSS.
# mss = (np.linalg.norm(estimate.to_array()
# - self.source.to_array(), axis=1) ** 2).mean()
# self.assertLess(mss, 1)
# print("------", algo)
# print("Converged: ", converged, "Estimate: ", estimate,
# "Fitness: ", fitness)
# print("Rotation: ")
# print(transf[0:3,0:3])
# print("Translation: ", transf[3, 0:3])
# print("---------")
def testGICP(self):
self.check_algo(gicp)
def testICP_NL(self):
self.check_algo(icp_nl)
def testIA_RANSAC(self):
# reducing radius makes this test fail
# reducing the max_iter to 1000 makes the test fail
self.check_algo(ia_ransac, radius=0.5, minSampleDistance=0.01,
maxCorrespondenceDistance=0.5, max_iter=10000)
def testICP(self):
self.check_algo(icp)
transf1 = icp(self.source, self.target, max_iter=1)[1]
transf2 = icp(self.source, self.target, max_iter=2)[1]
self.assertFalse(np.allclose(transf1, transf2, 0, 0.1),
"First and second transformation should be unequal"
" in this complicated registration.")
transf1 = icp(self.source, self.target, transformationEpsilon=0)[1]
transf2 = icp(self.source, self.target, transformationEpsilon=1)[1]
self.assertFalse(np.allclose(transf1, transf2, 0, 0.1),
"Transformations should be unequal"
" with different stopping criteria.")
transf1 = icp(self.source, self.target, euclideanFitnessEpsilon=0)[1]
transf2 = icp(self.source, self.target, euclideanFitnessEpsilon=1)[1]
self.assertFalse(np.allclose(transf1, transf2, 0, 0.1),
"Transformations should be unequal with different"
" stopping criteria.")
| 2.125 | 2 |
setup.py | napetrov/dpctl | 0 | 12763636 | <gh_stars>0
##===---------- setup.py - dpctl.ocldrv interface -----*- Python -*-----===##
##
## Data Parallel Control Library (dpCtl)
##
## Copyright 2020 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
##===----------------------------------------------------------------------===##
###
### \file
### This file builds the dpctl and dpctl.ocldrv extension modules.
##===----------------------------------------------------------------------===##
import os
import os.path
import sys
import versioneer
from setuptools import setup, Extension, find_packages
from Cython.Build import cythonize
import numpy as np
requirements = [
"cffi>=1.0.0",
"cython",
]
IS_WIN = False
IS_MAC = False
IS_LIN = False
if "linux" in sys.platform:
IS_LIN = True
elif sys.platform == "darwin":
IS_MAC = True
elif sys.platform in ["win32", "cygwin"]:
IS_WIN = True
else:
assert False, sys.platform + " not supported"
dppl_sycl_interface_lib = os.environ["DPPL_SYCL_INTERFACE_LIBDIR"]
dppl_sycl_interface_include = os.environ["DPPL_SYCL_INTERFACE_INCLDIR"]
sycl_lib = os.environ["ONEAPI_ROOT"] + "\compiler\latest\windows\lib"
def get_sdl_cflags():
if IS_LIN or IS_MAC:
return [
"-fstack-protector",
"-fPIC",
"-D_FORTIFY_SOURCE=2",
"-Wformat",
"-Wformat-security",
]
elif IS_WIN:
return []
def get_sdl_ldflags():
if IS_LIN:
return [
"-Wl,-z,noexecstack,-z,relro,-z,now",
]
elif IS_MAC:
return []
elif IS_WIN:
return ["/NXCompat", "/DynamicBase"]
def get_other_cxxflags():
if IS_LIN:
return ["-O3", "-fsycl", "-std=c++17"]
elif IS_MAC:
return []
elif IS_WIN:
# FIXME: These are specific to MSVC and we should first make sure
# what compiler we are using.
return ["/Ox", "/std:c++17"]
def extensions():
# Security flags
eca = get_sdl_cflags()
ela = get_sdl_ldflags()
libs = []
librarys = []
if IS_LIN:
libs += ["rt", "DPPLSyclInterface"]
elif IS_MAC:
pass
elif IS_WIN:
libs += ["DPPLSyclInterface", "sycl"]
if IS_LIN:
librarys = [dppl_sycl_interface_lib]
elif IS_WIN:
librarys = [dppl_sycl_interface_lib, sycl_lib]
elif IS_MAC:
librarys = [dppl_sycl_interface_lib]
if IS_LIN or IS_MAC:
runtime_library_dirs = ["$ORIGIN"]
elif IS_WIN:
runtime_library_dirs = []
extension_args = {
"depends": [
dppl_sycl_interface_include,
],
"include_dirs": [np.get_include(), dppl_sycl_interface_include],
"extra_compile_args": eca + get_other_cxxflags(),
"extra_link_args": ela,
"libraries": libs,
"library_dirs": librarys,
"runtime_library_dirs": runtime_library_dirs,
"language": "c++",
}
extensions = [
Extension(
"dpctl._sycl_core",
[
os.path.join("dpctl", "sycl_core.pyx"),
],
**extension_args
),
Extension(
"dpctl._memory",
[
os.path.join("dpctl", "_memory.pyx"),
],
**extension_args
),
]
exts = cythonize(extensions)
return exts
setup(
name="dpctl",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="A lightweight Python wrapper for a subset of OpenCL and SYCL.",
license="Apache 2.0",
author="<NAME>",
url="https://github.com/IntelPython/dpCtl",
packages=find_packages(include=["*"]),
include_package_data=True,
ext_modules=extensions(),
setup_requires=requirements,
cffi_modules=["./dpctl/opencl_core.py:ffi"],
install_requires=requirements,
keywords="dpctl",
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| 1.484375 | 1 |
native/native/websocket.py | Andrew-Chen-Wang/django-3.0-private-messaging | 1 | 12763637 | from public.socket import handle_connect, handle_disconnect, handle_receive
async def websocket_application(scope, receive, send):
while True:
event = await receive()
# This is the implementation of our business logic
# Each function is located in socket.py to be more clean
if event['type'] == 'websocket.connect':
await handle_connect(event, scope, receive, send)
if event['type'] == 'websocket.disconnect':
await handle_disconnect(event, scope, receive, send)
break
if event['type'] == 'websocket.receive':
await handle_receive(event, scope, receive, send)
| 2.78125 | 3 |
queue_fair_adapter/queue_fair_django_service.py | Queue-Fair/python | 0 | 12763638 | from queue_fair_adapter.queue_fair_service import QueueFairService
class QueueFairDjangoService(QueueFairService):
def __init__(self, request, response):
self.request = request
self.response = response
self.isSecure = request.is_secure()
def setCookie(self, name, value, lifetimeSeconds, domain):
# name,value,max_age,expires,path,domain,secure,httponly,samesite
self.response.set_cookie(name, value, lifetimeSeconds, None, "/",
domain, self.isSecure, False,
"None" if self.isSecure else None)
def redirect(self, location):
self.response.status_code = 302
self.addHeader("Location", location)
def getCookie(self, name):
if(name not in self.request.COOKIES):
return ""
return self.request.COOKIES[name]
def addHeader(self, name, value):
self.response.headers[name] = value
| 2.484375 | 2 |
tick/robust/tests/model_huber_test.py | sumau/tick | 411 | 12763639 | # License: BSD 3 clause
import unittest
import numpy as np
from scipy.sparse import csr_matrix
from tick.robust import ModelHuber
from tick.base_model.tests.generalized_linear_model import TestGLM
from tick.linear_model import SimuLinReg
class Test(TestGLM):
def test_ModelHuber(self):
"""...Numerical consistency check of loss and gradient for Huber model
"""
np.random.seed(12)
n_samples, n_features = 5000, 10
w0 = np.random.randn(n_features)
c0 = np.random.randn()
# First check with intercept
X, y = SimuLinReg(w0, c0, n_samples=n_samples,
verbose=False).simulate()
X_spars = csr_matrix(X)
model = ModelHuber(fit_intercept=True, threshold=1.3).fit(X, y)
model_spars = ModelHuber(fit_intercept=True, threshold=1.3).fit(
X_spars, y)
self.run_test_for_glm(model, model_spars)
self._test_glm_intercept_vs_hardcoded_intercept(model)
# Then check without intercept
X, y = SimuLinReg(w0, None, n_samples=n_samples, verbose=False,
seed=2038).simulate()
X_spars = csr_matrix(X)
model = ModelHuber(fit_intercept=False).fit(X, y)
model_spars = ModelHuber(fit_intercept=False).fit(X_spars, y)
self.run_test_for_glm(model, model_spars)
# Test for the Lipschitz constants without intercept
self.assertAlmostEqual(model.get_lip_best(), 2.6873683857125981)
self.assertAlmostEqual(model.get_lip_mean(), 9.95845726788432)
self.assertAlmostEqual(model.get_lip_max(), 54.82616964855237)
self.assertAlmostEqual(model_spars.get_lip_mean(),
model.get_lip_mean())
self.assertAlmostEqual(model_spars.get_lip_max(), model.get_lip_max())
# Test for the Lipschitz constants with intercept
model = ModelHuber(fit_intercept=True).fit(X, y)
model_spars = ModelHuber(fit_intercept=True).fit(X_spars, y)
self.assertAlmostEqual(model.get_lip_best(), 2.687568385712598)
self.assertAlmostEqual(model.get_lip_mean(), 10.958457267884327)
self.assertAlmostEqual(model.get_lip_max(), 55.82616964855237)
self.assertAlmostEqual(model_spars.get_lip_mean(),
model.get_lip_mean())
self.assertAlmostEqual(model_spars.get_lip_max(), model.get_lip_max())
def test_ModelHuber_threshold(self):
np.random.seed(12)
n_samples, n_features = 5000, 10
w0 = np.random.randn(n_features)
c0 = np.random.randn()
# First check with intercept
X, y = SimuLinReg(w0, c0, n_samples=n_samples,
verbose=False).simulate()
model = ModelHuber(threshold=1.541).fit(X, y)
self.assertEqual(model._model.get_threshold(), 1.541)
model.threshold = 3.14
self.assertEqual(model._model.get_threshold(), 3.14)
msg = '^threshold must be > 0$'
with self.assertRaisesRegex(RuntimeError, msg):
model = ModelHuber(threshold=-1).fit(X, y)
with self.assertRaisesRegex(RuntimeError, msg):
model.threshold = 0.
if __name__ == '__main__':
unittest.main()
| 2.234375 | 2 |
ctapipe/calib/camera/tests/test_calibrator.py | pgrespan/ctapipe | 0 | 12763640 | """
Tests for CameraCalibrator and related functions
"""
import numpy as np
import pytest
from scipy.stats import norm
from traitlets.config.configurable import Config
from astropy import units as u
from ctapipe.calib.camera.calibrator import CameraCalibrator
from ctapipe.image.extractor import LocalPeakWindowSum, FullWaveformSum
from ctapipe.instrument import CameraGeometry
from ctapipe.containers import DataContainer
@pytest.fixture(scope="function")
def subarray(example_event):
return example_event.inst.subarray
def test_camera_calibrator(example_event, subarray):
telid = list(example_event.r0.tel)[0]
calibrator = CameraCalibrator(subarray=subarray)
calibrator(example_event)
image = example_event.dl1.tel[telid].image
pulse_time = example_event.dl1.tel[telid].pulse_time
assert image is not None
assert pulse_time is not None
assert image.shape == (1764,)
assert pulse_time.shape == (1764,)
def test_manual_extractor(subarray):
calibrator = CameraCalibrator(
subarray=subarray,
image_extractor=LocalPeakWindowSum(subarray=subarray)
)
assert isinstance(calibrator.image_extractor, LocalPeakWindowSum)
def test_config(subarray):
window_shift = 3
window_width = 9
config = Config(
{
"LocalPeakWindowSum": {
"window_shift": window_shift,
"window_width": window_width,
}
}
)
calibrator = CameraCalibrator(
subarray=subarray,
image_extractor=LocalPeakWindowSum(subarray=subarray, config=config),
config=config
)
assert calibrator.image_extractor.window_shift.tel[None] == window_shift
assert calibrator.image_extractor.window_width.tel[None] == window_width
def test_check_r1_empty(example_event, subarray):
calibrator = CameraCalibrator(subarray=subarray)
telid = list(example_event.r0.tel)[0]
waveform = example_event.r1.tel[telid].waveform.copy()
with pytest.warns(UserWarning):
example_event.r1.tel[telid].waveform = None
calibrator._calibrate_dl0(example_event, telid)
assert example_event.dl0.tel[telid].waveform is None
assert calibrator._check_r1_empty(None) is True
assert calibrator._check_r1_empty(waveform) is False
calibrator = CameraCalibrator(
subarray=subarray,
image_extractor=FullWaveformSum(subarray=subarray)
)
event = DataContainer()
event.dl0.tel[telid].waveform = np.full((2048, 128), 2)
with pytest.warns(UserWarning):
calibrator(event)
assert (event.dl0.tel[telid].waveform == 2).all()
assert (event.dl1.tel[telid].image == 2 * 128).all()
def test_check_dl0_empty(example_event, subarray):
calibrator = CameraCalibrator(subarray=subarray)
telid = list(example_event.r0.tel)[0]
calibrator._calibrate_dl0(example_event, telid)
waveform = example_event.dl0.tel[telid].waveform.copy()
with pytest.warns(UserWarning):
example_event.dl0.tel[telid].waveform = None
calibrator._calibrate_dl1(example_event, telid)
assert example_event.dl1.tel[telid].image is None
assert calibrator._check_dl0_empty(None) is True
assert calibrator._check_dl0_empty(waveform) is False
calibrator = CameraCalibrator(subarray=subarray)
event = DataContainer()
event.dl1.tel[telid].image = np.full(2048, 2)
with pytest.warns(UserWarning):
calibrator(event)
assert (event.dl1.tel[telid].image == 2).all()
def test_dl1_charge_calib(subarray):
camera = CameraGeometry.from_name("CHEC")
n_pixels = camera.n_pixels
n_samples = 96
mid = n_samples // 2
pulse_sigma = 6
random = np.random.RandomState(1)
x = np.arange(n_samples)
# Randomize times and create pulses
time_offset = random.uniform(mid - 10, mid + 10, n_pixels)[:, np.newaxis]
y = norm.pdf(x, time_offset, pulse_sigma)
# Define absolute calibration coefficients
absolute = random.uniform(100, 1000, n_pixels)
y *= absolute[:, np.newaxis]
# Define relative coefficients
relative = random.normal(1, 0.01, n_pixels)
y /= relative[:, np.newaxis]
# Define pedestal
pedestal = random.uniform(-4, 4, n_pixels)
y += pedestal[:, np.newaxis]
event = DataContainer()
telid = list(subarray.tel.keys())[0]
event.dl0.tel[telid].waveform = y
# Test default
calibrator = CameraCalibrator(
subarray=subarray,
image_extractor=FullWaveformSum(subarray=subarray)
)
calibrator(event)
np.testing.assert_allclose(event.dl1.tel[telid].image, y.sum(1))
event.calibration.tel[telid].dl1.time_shift = time_offset
event.calibration.tel[telid].dl1.pedestal_offset = pedestal * n_samples
event.calibration.tel[telid].dl1.absolute_factor = absolute
event.calibration.tel[telid].dl1.relative_factor = relative
# Test without need for timing corrections
calibrator = CameraCalibrator(
subarray=subarray,
image_extractor=FullWaveformSum(subarray=subarray)
)
calibrator(event)
np.testing.assert_allclose(event.dl1.tel[telid].image, 1)
# TODO: Test with timing corrections
| 2.109375 | 2 |
mp3sum/util.py | okdana/mp3crc | 6 | 12763641 | # -*- coding: utf-8 -*-
"""
Utility functions.
"""
import crcmod.predefined
"""
Computes the CRC-16 check-sum of a string.
"""
crc16 = crcmod.predefined.mkCrcFun('crc-16')
def unpad_integer(integer, bits=7):
"""
Decodes a bit-padded integer such as the one used for ID3v2 tag sizes.
@param bytearray integer
The integer to unpad in its 'raw' byte-array form.
@param int bits
(optional) The number of non-padded bits to the integer. The default is
7 (as used by ID3v2).
@return int
The unpadded integer.
"""
mask = (1 << (bits)) - 1
bytes = []
result = 0
while integer:
bytes.append(integer & mask)
integer = integer >> 8
for shift, byte in zip(range(0, len(bytes) * bits, bits), bytes):
result += byte << shift
return result
def format_offset(offset):
"""
Formats an integer file offset.
@param int offset
The integer offset to format.
@return str
The formatted offset.
"""
if offset is None or offset < 0:
return 'None'
return "0x%08x (%i)" % (offset, offset)
| 3.5 | 4 |
YoutubeImg.py | Chromeina/My_Python_Based_Tools | 0 | 12763642 | import urllib.request
from os import rename, mkdir
web_url = input("請輸入Youtube網址: ")
# web_url = input("Enter Youtube video url: ")
img_file = open("ytimg.txt", "a", encoding="utf-8")
video_link = web_url[32:]
img_link = ("https://img.youtube.com/vi/" + video_link + "/maxresdefault.jpg" + "\n")
img_file.write(img_link)
img_file.close()
try:
mkdir("Image")
except FileExistsError:
Do_Nothing = 0
urllib.request.urlretrieve(img_link, "Image/maxresdefault.jpg")
rename('Image/maxresdefault.jpg', 'Image/' + video_link + '.jpg')
| 3.234375 | 3 |
tests/cases/files.py | HappyEinara/wingline | 0 | 12763643 | """Test cases."""
from wingline.files import containers, formats
def case_dynamodb_jsonl_gz(data_dir):
"""Gzipped, DynamoDb-serialized JsonLines."""
# b2sum --length 64 --binary examples/data/dynamodb-tv-casts.jl.gz
# c8e2e027a73751df *examples/data/dynamodb-tv-casts.jl.gz
return (
data_dir / "dynamodb-tv-casts.jl.gz",
"c8e2e027a73751df",
containers.Gzip,
formats.JsonLines,
85,
)
| 2.046875 | 2 |
solutions/061_solution_02.py | UFResearchComputing/py4ai | 0 | 12763644 | <filename>solutions/061_solution_02.py
# 1. We can check out the first five rows of `americas` by executing
# `americas.head()` (allowing us to view the head of the DataFrame). We can
# specify the number of rows we wish to see by specifying the parameter `n`
# in our call to `americas.head()`. To view the first three rows, execute:
americas.head(n=3)
# The output is then
continent gdpPercap_1952 gdpPercap_1957 gdpPercap_1962 \
country
Argentina Americas 5911.315053 6856.856212 7133.166023
Bolivia Americas 2677.326347 2127.686326 2180.972546
Brazil Americas 2108.944355 2487.365989 3336.585802`
gdpPercap_1967 gdpPercap_1972 gdpPercap_1977 gdpPercap_1982 \
country
Argentina 8052.953021 9443.038526 10079.026740 8997.897412
Bolivia 2586.886053 2980.331339 3548.097832 3156.510452
Brazil 3429.864357 4985.711467 6660.118654 7030.835878`
gdpPercap_1987 gdpPercap_1992 gdpPercap_1997 gdpPercap_2002 \
country
Argentina 9139.671389 9308.418710 10967.281950 8797.640716
Bolivia 2753.691490 2961.699694 3326.143191 3413.262690
Brazil 7807.095818 6950.283021 7957.980824 8131.212843`
gdpPercap_2007
country
Argentina 12779.379640
Bolivia 3822.137084
Brazil 9065.800825
# 2. To check out the last three rows of `americas`, we would use the command,
# `americas.tail(n=3)`, analogous to `head()` used above. However, here we want
# to look at the last three columns so we need to change our view and then use
# `tail()`. To do so, we create a new DataFrame in which rows and columns are
# switched
americas_flipped = americas.T
# We can then view the last three columns of `americas` by viewing the last
# three rows of `americas_flipped`:
americas_flipped.tail(n = 3)
# The output is then:
country Argentina Bolivia Brazil Canada Chile Colombia \
gdpPercap_1997 10967.3 3326.14 7957.98 28954.9 10118.1 6117.36
gdpPercap_2002 8797.64 3413.26 8131.21 33329 10778.8 5755.26
gdpPercap_2007 12779.4 3822.14 9065.8 36319.2 13171.6 7006.58
country Costa Rica Cuba Dominican Republic Ecuador ... \
gdpPercap_1997 6677.05 5431.99 3614.1 7429.46 ...
gdpPercap_2002 7723.45 6340.65 4563.81 5773.04 ...
gdpPercap_2007 9645.06 8948.1 6025.37 6873.26 ...
country Mexico Nicaragua Panama Paraguay Peru Puerto Rico \
gdpPercap_1997 9767.3 2253.02 7113.69 4247.4 5838.35 16999.4
gdpPercap_2002 10742.4 2474.55 7356.03 3783.67 5909.02 18855.6
gdpPercap_2007 11977.6 2749.32 9809.19 4172.84 7408.91 19328.7
country Trinidad and Tobago United States Uruguay Venezuela
gdpPercap_1997 8792.57 35767.4 9230.24 10165.5
gdpPercap_2002 11460.6 39097.1 7727 8605.05
gdpPercap_2007 18008.5 42951.7 10611.5 11415.8
Note: we could have done the above in a single line of code by 'chaining' the commands:
americas.T.tail(n=3)
| 4.3125 | 4 |
orchard/configuration/__init__.py | BMeu/Orchard | 2 | 12763645 | # -*- coding: utf-8 -*-
"""
A collection of default configurations for certain operation modes of |projectname|.
"""
from .basedir import basedir
from .default import Default
from .development import Development
from .production import Production
from .testing import Testing
__all__ = ['basedir', 'Default', 'Development', 'Production', 'Testing']
| 1.195313 | 1 |
tests/test_mr_presso.py | adriaan-vd-graaf/genome_integration | 13 | 12763646 | <filename>tests/test_mr_presso.py
from genome_integration import causal_inference
def test_mr_presso_with_reference_implementation():
"""
This test tests the reference implementation of MR presso, found on github:
https://github.com/rondolab/MR-PRESSO
It loads in the dataset located in the data folder, and then does MR presso on the two exposures from this dataset.
on the same outcome in this dataset.
The functions used for this are the following (adapted from reference implementation):
mr_presso(BetaOutcome = "Y_effect", BetaExposure = "E1_effect", SdOutcome = "Y_se", SdExposure = "E1_se", OUTLIERtest = TRUE, DISTORTIONtest = TRUE, data = SummaryStats, NbDistribution = 1000, SignifThreshold = 0.2)
mr_presso(BetaOutcome = "Y_effect", BetaExposure = "E2_effect", SdOutcome = "Y_se", SdExposure = "E2_se", OUTLIERtest = TRUE, DISTORTIONtest = TRUE, data = SummaryStats, NbDistribution = 1000, SignifThreshold = 0.2)
:return:
"""
#these results are from the MR presso function, which was used in
# the results are fluid, as it is a permutation scheme, but should be very close to what I find.
mr_presso_reference_ex1 = (0.5014829, 0.01047948)
mr_presso_reference_ex2 = (0.8613234, 0.02143151)
mr_presso_object_ex1 = causal_inference.MendelianRandomization()
mr_presso_object_ex2 = causal_inference.MendelianRandomization()
resource_path = '/'.join(('test_resources', 'mr_presso_data.txt'))
if len(__file__.split("/")) >1:
mr_presso_file = "{}/{}".format("/".join(__file__.split("/")[:-1]), resource_path)
else:
mr_presso_file = resource_path
with open(mr_presso_file, "r") as f:
f.readline()
for line in f:
split = line.split()
mr_presso_object_ex1.do_and_add_single_term_mr_estimation(
(float(split[0]), float(split[1])) , (float(split[6]), float(split[7]))
)
mr_presso_object_ex2.do_and_add_single_term_mr_estimation(
(float(split[3]), float(split[4])), (float(split[6]), float(split[7]))
)
mr_presso_result_ex1 = mr_presso_object_ex1.mr_presso(n_sims=1000, significance_thresh=0.2)
mr_presso_result_ex2 = mr_presso_object_ex2.mr_presso(n_sims=1000, significance_thresh=0.2)
# The precision of these results is dependent on the n_sims
# if you want you can reduce the precision (0.02 below) and subsequently increase the n_sims parameter,
# but this takes a fair bit of time.
assert(mr_presso_result_ex1[0] - mr_presso_reference_ex1[0] < 0.02)
assert (mr_presso_result_ex1[1] - mr_presso_reference_ex1[1] < 0.02)
assert abs(mr_presso_result_ex2[0] - mr_presso_reference_ex2[0]) < 0.02
assert abs(mr_presso_result_ex2[1] - mr_presso_reference_ex2[1]) < 0.02 | 2.421875 | 2 |
tests/functional/__init__.py | netwrkr/ensconce | 1 | 12763647 | <gh_stars>1-10
import os.path
import threading
import socket
import collections
import time
import configobj
import cherrypy
import requests
from cherrypy.process.servers import wait_for_free_port
import selenium.webdriver as webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from ensconce.config import init_app, config
from ensconce.autolog import log
from ensconce import server
from tests import BaseModelTest
__here__ = os.path.abspath(os.path.dirname(__file__))
DriverDetails = collections.namedtuple('DriverDetails', ["capabilities", "remote_host"])
def get_selenium_config():
cfg = configobj.ConfigObj(os.path.join(__here__, 'settings.cfg'), interpolation='template')
local_settings_path = os.path.join(__here__, 'local.settings.cfg')
if os.path.exists(local_settings_path):
cfg.merge(configobj.ConfigObj(local_settings_path, interpolation='template'))
return cfg
def get_server_information():
sel_config = get_selenium_config()
server_scheme = sel_config.get('server_scheme', 'http')
server_hostname = sel_config.get('server_hostname', socket.getfqdn())
server_port = sel_config.get('server_port', config['server.socket_port'])
return '{0}://{1}:{2}'.format(server_scheme, server_hostname, server_port)
def get_configured_remote_drivers():
"""
Get the relevant WebDriver information from the associated config files
"""
sel_config = get_selenium_config()
drivers = []
for k, v in sel_config.iteritems():
if not isinstance(v, configobj.Section):
continue
# all sections in this configuration file is interpreted as a webdriver
# browser DesireCapabilities attribute with connection information
drivers.append(DriverDetails(k, v['remote']))
return drivers or [DriverDetails("Firefox", "unused")]
class FunctionalTestController(BaseModelTest):
@classmethod
def setUpClass(cls):
super(FunctionalTestController, cls).setUpClass()
#from ensconce import server_autoconfig
if not server.configured:
server.configure()
assert 'db' in config['auth.provider'], "Need to have 'db' in providers list."
assert not config['server.ssl_certificate'], "SSL isn't supported yet for functional tests."
# This is done so that we get a nicer error message if the port is still in-use.
# (CherryPy will just silently sys.exit())
# Let's try using cherrypy's method directly.
#if not check_port(config["server.socket_host"], config["server.socket_port"]):
# raise IOError("Port is not free: {0}:{1}".format(config["server.socket_host"], config["server.socket_port"]))
#
# Interestingly that fails ..
wait_for_free_port(config["server.socket_host"], config["server.socket_port"], timeout=60) # net.ipv4.tcp_fin_timeout
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # http://www.unixguide.net/network/socketfaq/4.5.shtml
# sock.bind((config["server.socket_host"], config["server.socket_port"]))
# sock.close()
cherrypy.engine.start()
cherrypy.engine.wait(cherrypy.engine.states.STARTED)
@classmethod
def tearDownClass(cls):
cherrypy.engine.stop()
cherrypy.engine.wait(cherrypy.engine.states.STOPPED)
#time.sleep(60) # Waiting for net.ipv4.tcp_fin_timeout seconds. This is really hackish.
super(FunctionalTestController, cls).tearDownClass()
def setUp(self):
super(FunctionalTestController, self).setUp()
def tearDown(self):
super(FunctionalTestController, self).tearDown()
#self.server_thread.
def url(self, *args, **kwargs):
return get_server_information() + '/' + '/'.join(args)
class SeleniumTestController(FunctionalTestController):
"""
Connect to the remote selenium webdriver to start running tests
"""
driver = get_configured_remote_drivers()[0]
required_ws_version = 13
@classmethod
def setUpClass(cls):
super(SeleniumTestController, cls).setUpClass()
try:
wd = cls.get_webdriver()
except:
# addCleanup is an instance method; cannot be called from classmethods
cls.tearDownClass()
raise
cls.wd = wd
cls.initial_loads()
@classmethod
def get_webdriver(cls):
exe = '{0}/wd/hub'.format(cls.driver.remote_host)
cap = getattr(DesiredCapabilities, cls.driver.capabilities)
log.debug("{0!r} {1!r}", exe, cap)
return webdriver.Remote(command_executor=exe, desired_capabilities=cap)
@classmethod
def initial_loads(cls):
cls.url_base = get_server_information()
cls.wd.get(cls.url_base)
cls.wd.set_window_size(1200, 800)
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wd'):
log.debug("Quittting the WebDriver.")
cls.wd.quit()
super(SeleniumTestController, cls).tearDownClass()
def fail_login(self, username, password):
"""
Simple method to assure that login fails with specified creds.
"""
self.wd.find_element_by_id("username").send_keys(username)
self.wd.find_element_by_id("password").send_keys(password)
self.wd.find_element_by_css_selector("button.submit").click() # implicit wait
self.assert_form_error("Invalid username/password.")
def login(self, username="op1", password="<PASSWORD>"):
self.wd.find_element_by_id("username").send_keys(username)
self.wd.find_element_by_id("password").send_keys(password)
self.wd.find_element_by_css_selector("button.submit").click() # implicit wait
self.assertTrue(self.is_element_present(By.ID, "welcome"))
welcome = self.wd.find_element(By.ID, "welcome")
self.assertIn(username, welcome.text)
def logout(self):
# FIXME: click button
self.wd.get(self.url_base + "/logout")
def open_url(self, path):
self.wd.get(self.url_base + path)
def submit_form(self, form_id):
self.wd.find_element_by_css_selector("#{0} button.submit".format(form_id)).click() # implicit wait
def is_element_present(self, how, what):
try:
self.wd.find_element(by=how, value=what)
except NoSuchElementException:
return False
return True
def assert_num_rows(self, num_rows, table=None):
if table is not None:
xpath = "//table[{0}]".format(table)
else:
xpath = "//table"
xpath += "/tbody/tr"
elements = self.wd.find_elements(By.XPATH, xpath)
self.assertEquals(num_rows, len(elements))
def _assert_in_data_table(self, name, negate=False, row=1, is_link=False, table=1):
if table is not None:
xpath = "//table[{0}]".format(table)
else:
xpath = "//table"
xpath += "/tbody/tr[{0}]/td".format(row)
if is_link:
xpath += "/a"
elements = self.wd.find_elements(By.XPATH, xpath)
names = [e.text for e in elements]
if negate:
self.assertNotIn(name, names)
else:
self.assertIn(name, names)
def assert_in_data_table(self, name, row=1, is_link=False, table=1):
self._assert_in_data_table(name=name, negate=False, row=row, is_link=is_link, table=table)
def assert_not_in_data_table(self, name, row=1, is_link=False, table=1):
self._assert_in_data_table(name=name, negate=True, row=row, is_link=is_link, table=table)
def _assert_in_list_table(self, name, negate=False, column=1, is_link=True, nobr=False, table=None):
if table is not None:
xpath = "//table[{0}]".format(table)
else:
xpath = "//table"
xpath += "/tbody/tr/td[{0}]".format(column)
if nobr:
xpath += '/nobr'
if is_link:
xpath += "/a"
elements = self.wd.find_elements(By.XPATH, xpath)
names = [e.text for e in elements]
if negate:
self.assertNotIn(name, names)
else:
self.assertIn(name, names)
def assert_in_list_table(self, name, column=1, is_link=True, nobr=False, table=None):
self._assert_in_list_table(name=name, negate=False, column=column, is_link=is_link, nobr=nobr, table=table)
def assert_not_in_list_table(self, name, column=1, is_link=True, nobr=False, table=None):
self._assert_in_list_table(name=name, negate=True, column=column, is_link=is_link, nobr=nobr, table=table)
def assert_form_error(self, message):
errors = self.wd.find_elements(By.CLASS_NAME, "form_error")
error_messages = [e.text for e in errors]
self.assertIn(message, error_messages)
def assert_error_page(self):
"""
Asserts that we got the generic 500 error page.
"""
self.assertEquals("Unable to complete request", self.wd.title, "Expected 500 error page.")
def assert_notification(self, msg):
"""
Assert a specific notification message is present.
"""
elements = WebDriverWait(self.wd, 5).until(lambda d: d.find_elements(By.XPATH, "//div[@class='notification']/span"))
notifications = [e.text for e in elements]
self.assertIn(msg, notifications) | 1.96875 | 2 |
tests/test_session.py | EdwardBetts/pyspotify | 2 | 12763648 | <gh_stars>1-10
# encoding: utf-8
from __future__ import unicode_literals
import unittest
import spotify
from spotify.session import _SessionCallbacks
import tests
from tests import mock
@mock.patch('spotify.session.lib', spec=spotify.lib)
class SessionTest(unittest.TestCase):
def tearDown(self):
spotify._session_instance = None
def test_raises_error_if_a_session_already_exists(self, lib_mock):
tests.create_real_session(lib_mock)
with self.assertRaises(RuntimeError):
tests.create_real_session(lib_mock)
@mock.patch('spotify.Config')
def test_creates_config_if_none_provided(self, config_cls_mock, lib_mock):
lib_mock.sp_session_create.return_value = spotify.ErrorType.OK
session = spotify.Session()
config_cls_mock.assert_called_once_with()
self.assertEqual(session.config, config_cls_mock.return_value)
@mock.patch('spotify.Config')
def test_tries_to_load_application_key_if_none_provided(
self, config_cls_mock, lib_mock):
lib_mock.sp_session_create.return_value = spotify.ErrorType.OK
config_mock = config_cls_mock.return_value
config_mock.application_key = None
spotify.Session()
config_mock.load_application_key_file.assert_called_once_with()
def test_raises_error_if_not_ok(self, lib_mock):
lib_mock.sp_session_create.return_value = (
spotify.ErrorType.BAD_API_VERSION)
config = spotify.Config()
config.application_key = b'\x01' * 321
with self.assertRaises(spotify.Error):
spotify.Session(config=config)
def test_releases_sp_session_when_session_dies(self, lib_mock):
sp_session = spotify.ffi.NULL
def func(sp_session_config, sp_session_ptr):
sp_session_ptr[0] = sp_session
return spotify.ErrorType.OK
lib_mock.sp_session_create.side_effect = func
config = spotify.Config()
config.application_key = b'\x01' * 321
session = spotify.Session(config=config)
session = None # noqa
spotify._session_instance = None
tests.gc_collect()
lib_mock.sp_session_release.assert_called_with(sp_session)
def test_login_raises_error_if_no_password_and_no_blob(self, lib_mock):
lib_mock.sp_session_login.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
with self.assertRaises(AttributeError):
session.login('alice')
def test_login_with_password(self, lib_mock):
lib_mock.sp_session_login.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
session.login('alice', 'secret')
lib_mock.sp_session_login.assert_called_once_with(
session._sp_session, mock.ANY, mock.ANY,
False, spotify.ffi.NULL)
self.assertEqual(
spotify.ffi.string(lib_mock.sp_session_login.call_args[0][1]),
b'alice')
self.assertEqual(
spotify.ffi.string(lib_mock.sp_session_login.call_args[0][2]),
b'secret')
def test_login_with_blob(self, lib_mock):
lib_mock.sp_session_login.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
session.login('alice', blob='secret blob')
lib_mock.sp_session_login.assert_called_once_with(
session._sp_session, mock.ANY, spotify.ffi.NULL,
False, mock.ANY)
self.assertEqual(
spotify.ffi.string(lib_mock.sp_session_login.call_args[0][1]),
b'alice')
self.assertEqual(
spotify.ffi.string(lib_mock.sp_session_login.call_args[0][4]),
b'secret blob')
def test_login_with_remember_me_flag(self, lib_mock):
lib_mock.sp_session_login.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
session.login('alice', 'secret', remember_me='anything truish')
lib_mock.sp_session_login.assert_called_once_with(
session._sp_session, mock.ANY, mock.ANY,
True, spotify.ffi.NULL)
def test_login_fail_raises_error(self, lib_mock):
lib_mock.sp_session_login.return_value = spotify.ErrorType.NO_SUCH_USER
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.login('alice', 'secret')
def test_logout(self, lib_mock):
lib_mock.sp_session_logout.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
session.logout()
lib_mock.sp_session_logout.assert_called_once_with(session._sp_session)
def test_logout_fail_raises_error(self, lib_mock):
lib_mock.sp_session_login.return_value = (
spotify.ErrorType.BAD_API_VERSION)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.logout()
def test_remembered_user_name_grows_buffer_to_fit_username(self, lib_mock):
username = 'alice' * 100
lib_mock.sp_session_remembered_user.side_effect = (
tests.buffer_writer(username))
session = tests.create_real_session(lib_mock)
result = session.remembered_user_name
lib_mock.sp_session_remembered_user.assert_called_with(
session._sp_session, mock.ANY, mock.ANY)
self.assertEqual(result, username)
def test_remembered_user_name_is_none_if_not_remembered(self, lib_mock):
lib_mock.sp_session_remembered_user.return_value = -1
session = tests.create_real_session(lib_mock)
result = session.remembered_user_name
lib_mock.sp_session_remembered_user.assert_called_with(
session._sp_session, mock.ANY, mock.ANY)
self.assertIsNone(result)
def test_relogin(self, lib_mock):
lib_mock.sp_session_relogin.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
session.relogin()
lib_mock.sp_session_relogin.assert_called_once_with(
session._sp_session)
def test_relogin_fail_raises_error(self, lib_mock):
lib_mock.sp_session_relogin.return_value = (
spotify.ErrorType.NO_CREDENTIALS)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.relogin()
def test_forget_me(self, lib_mock):
lib_mock.sp_session_forget_me.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
session.forget_me()
lib_mock.sp_session_forget_me.assert_called_with(session._sp_session)
def test_forget_me_fail_raises_error(self, lib_mock):
lib_mock.sp_session_forget_me.return_value = (
spotify.ErrorType.BAD_API_VERSION)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.forget_me()
@mock.patch('spotify.user.lib', spec=spotify.lib)
def test_user(self, user_lib_mock, lib_mock):
lib_mock.sp_session_user.return_value = (
spotify.ffi.cast('sp_user *', 42))
session = tests.create_real_session(lib_mock)
result = session.user
lib_mock.sp_session_user.assert_called_with(session._sp_session)
self.assertIsInstance(result, spotify.User)
def test_user_if_not_logged_in(self, lib_mock):
lib_mock.sp_session_user.return_value = spotify.ffi.NULL
session = tests.create_real_session(lib_mock)
result = session.user
lib_mock.sp_session_user.assert_called_with(session._sp_session)
self.assertIsNone(result)
def test_user_name(self, lib_mock):
lib_mock.sp_session_user_name.return_value = spotify.ffi.new(
'char[]', b'alice')
session = tests.create_real_session(lib_mock)
result = session.user_name
lib_mock.sp_session_user_name.assert_called_with(session._sp_session)
self.assertEqual(result, 'alice')
def test_user_country(self, lib_mock):
lib_mock.sp_session_user_country.return_value = (
ord('S') << 8 | ord('E'))
session = tests.create_real_session(lib_mock)
result = session.user_country
lib_mock.sp_session_user_country.assert_called_with(
session._sp_session)
self.assertEqual(result, 'SE')
@mock.patch('spotify.playlist_container.lib', spec=spotify.lib)
def test_playlist_container(self, playlist_lib_mock, lib_mock):
lib_mock.sp_session_playlistcontainer.return_value = (
spotify.ffi.cast('sp_playlistcontainer *', 42))
session = tests.create_real_session(lib_mock)
result = session.playlist_container
lib_mock.sp_session_playlistcontainer.assert_called_with(
session._sp_session)
self.assertIsInstance(result, spotify.PlaylistContainer)
@mock.patch('spotify.playlist_container.lib', spec=spotify.lib)
def test_playlist_container_if_already_listened_to(
self, playlist_lib_mock, lib_mock):
lib_mock.sp_session_playlistcontainer.return_value = (
spotify.ffi.cast('sp_playlistcontainer *', 42))
session = tests.create_real_session(lib_mock)
result1 = session.playlist_container
result1.on(
spotify.PlaylistContainerEvent.PLAYLIST_ADDED, lambda *args: None)
result2 = session.playlist_container
result1.off()
self.assertIsInstance(result1, spotify.PlaylistContainer)
self.assertIs(result1, result2)
def test_playlist_container_if_not_logged_in(self, lib_mock):
lib_mock.sp_session_playlistcontainer.return_value = spotify.ffi.NULL
session = tests.create_real_session(lib_mock)
result = session.playlist_container
lib_mock.sp_session_playlistcontainer.assert_called_with(
session._sp_session)
self.assertIsNone(result)
@mock.patch('spotify.playlist.lib', spec=spotify.lib)
def test_inbox(self, playlist_lib_mock, lib_mock):
lib_mock.sp_session_inbox_create.return_value = (
spotify.ffi.cast('sp_playlist *', 42))
session = tests.create_real_session(lib_mock)
result = session.inbox
lib_mock.sp_session_inbox_create.assert_called_with(
session._sp_session)
self.assertIsInstance(result, spotify.Playlist)
# Since we *created* the sp_playlist, we already have a refcount of 1
# and shouldn't increase the refcount when wrapping this sp_playlist in
# a Playlist object
self.assertEqual(playlist_lib_mock.sp_playlist_add_ref.call_count, 0)
def test_inbox_if_not_logged_in(self, lib_mock):
lib_mock.sp_session_inbox_create.return_value = spotify.ffi.NULL
session = tests.create_real_session(lib_mock)
result = session.inbox
lib_mock.sp_session_inbox_create.assert_called_with(
session._sp_session)
self.assertIsNone(result)
def test_set_cache_size(self, lib_mock):
lib_mock.sp_session_set_cache_size.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
session.set_cache_size(100)
lib_mock.sp_session_set_cache_size.assert_called_once_with(
session._sp_session, 100)
def test_set_cache_size_fail_raises_error(self, lib_mock):
lib_mock.sp_session_set_cache_size.return_value = (
spotify.ErrorType.BAD_API_VERSION)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.set_cache_size(100)
def test_flush_caches(self, lib_mock):
lib_mock.sp_session_flush_caches.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
session.flush_caches()
lib_mock.sp_session_flush_caches.assert_called_once_with(
session._sp_session)
def test_flush_caches_fail_raises_error(self, lib_mock):
lib_mock.sp_session_flush_caches.return_value = (
spotify.ErrorType.BAD_API_VERSION)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.flush_caches()
def test_preferred_bitrate(self, lib_mock):
lib_mock.sp_session_preferred_bitrate.return_value = (
spotify.ErrorType.OK)
session = tests.create_real_session(lib_mock)
session.preferred_bitrate(spotify.Bitrate.BITRATE_320k)
lib_mock.sp_session_preferred_bitrate.assert_called_with(
session._sp_session, spotify.Bitrate.BITRATE_320k)
def test_preferred_bitrate_fail_raises_error(self, lib_mock):
lib_mock.sp_session_preferred_bitrate.return_value = (
spotify.ErrorType.INVALID_ARGUMENT)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.preferred_bitrate(17)
def test_preferred_offline_bitrate(self, lib_mock):
lib_mock.sp_session_preferred_offline_bitrate.return_value = (
spotify.ErrorType.OK)
session = tests.create_real_session(lib_mock)
session.preferred_offline_bitrate(spotify.Bitrate.BITRATE_320k)
lib_mock.sp_session_preferred_offline_bitrate.assert_called_with(
session._sp_session, spotify.Bitrate.BITRATE_320k, 0)
def test_preferred_offline_bitrate_with_allow_resync(self, lib_mock):
lib_mock.sp_session_preferred_offline_bitrate.return_value = (
spotify.ErrorType.OK)
session = tests.create_real_session(lib_mock)
session.preferred_offline_bitrate(
spotify.Bitrate.BITRATE_320k, allow_resync=True)
lib_mock.sp_session_preferred_offline_bitrate.assert_called_with(
session._sp_session, spotify.Bitrate.BITRATE_320k, 1)
def test_preferred_offline_bitrate_fail_raises_error(self, lib_mock):
lib_mock.sp_session_preferred_offline_bitrate.return_value = (
spotify.ErrorType.INVALID_ARGUMENT)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.preferred_offline_bitrate(17)
def test_get_volume_normalization(self, lib_mock):
lib_mock.sp_session_get_volume_normalization.return_value = 0
session = tests.create_real_session(lib_mock)
result = session.volume_normalization
lib_mock.sp_session_get_volume_normalization.assert_called_with(
session._sp_session)
self.assertFalse(result)
def test_set_volume_normalization(self, lib_mock):
lib_mock.sp_session_set_volume_normalization.return_value = (
spotify.ErrorType.OK)
session = tests.create_real_session(lib_mock)
session.volume_normalization = True
lib_mock.sp_session_set_volume_normalization.assert_called_with(
session._sp_session, 1)
def test_set_volume_normalization_fail_raises_error(self, lib_mock):
lib_mock.sp_session_set_volume_normalization.return_value = (
spotify.ErrorType.BAD_API_VERSION)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.volume_normalization = True
def test_process_events_returns_ms_to_next_timeout(self, lib_mock):
def func(sp_session, int_ptr):
int_ptr[0] = 5500
return spotify.ErrorType.OK
lib_mock.sp_session_process_events.side_effect = func
session = tests.create_real_session(lib_mock)
timeout = session.process_events()
self.assertEqual(timeout, 5500)
def test_process_events_fail_raises_error(self, lib_mock):
lib_mock.sp_session_process_events.return_value = (
spotify.ErrorType.BAD_API_VERSION)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.process_events()
@mock.patch('spotify.InboxPostResult', spec=spotify.InboxPostResult)
def test_inbox_post_tracks(self, inbox_mock, lib_mock):
session = tests.create_real_session(lib_mock)
inbox_instance_mock = inbox_mock.return_value
result = session.inbox_post_tracks(
mock.sentinel.username, mock.sentinel.tracks,
mock.sentinel.message, mock.sentinel.callback)
inbox_mock.assert_called_with(
session, mock.sentinel.username, mock.sentinel.tracks,
mock.sentinel.message, mock.sentinel.callback)
self.assertEqual(result, inbox_instance_mock)
@mock.patch('spotify.playlist.lib', spec=spotify.lib)
def test_get_starred(self, playlist_lib_mock, lib_mock):
lib_mock.sp_session_starred_for_user_create.return_value = (
spotify.ffi.cast('sp_playlist *', 42))
session = tests.create_real_session(lib_mock)
result = session.get_starred('alice')
lib_mock.sp_session_starred_for_user_create.assert_called_with(
session._sp_session, b'alice')
self.assertIsInstance(result, spotify.Playlist)
# Since we *created* the sp_playlist, we already have a refcount of 1
# and shouldn't increase the refcount when wrapping this sp_playlist in
# a Playlist object
self.assertEqual(playlist_lib_mock.sp_playlist_add_ref.call_count, 0)
@mock.patch('spotify.playlist.lib', spec=spotify.lib)
def test_get_starred_for_current_user(self, playlist_lib_mock, lib_mock):
lib_mock.sp_session_starred_create.return_value = (
spotify.ffi.cast('sp_playlist *', 42))
session = tests.create_real_session(lib_mock)
result = session.get_starred()
lib_mock.sp_session_starred_create.assert_called_with(
session._sp_session)
self.assertIsInstance(result, spotify.Playlist)
# Since we *created* the sp_playlist, we already have a refcount of 1
# and shouldn't increase the refcount when wrapping this sp_playlist in
# a Playlist object
self.assertEqual(playlist_lib_mock.sp_playlist_add_ref.call_count, 0)
def test_get_starred_if_not_logged_in(self, lib_mock):
lib_mock.sp_session_starred_for_user_create.return_value = (
spotify.ffi.NULL)
session = tests.create_real_session(lib_mock)
result = session.get_starred('alice')
lib_mock.sp_session_starred_for_user_create.assert_called_with(
session._sp_session, b'alice')
self.assertIsNone(result)
@mock.patch('spotify.playlist_container.lib', spec=spotify.lib)
def test_get_published_playlists(self, playlist_lib_mock, lib_mock):
func_mock = lib_mock.sp_session_publishedcontainer_for_user_create
func_mock.return_value = spotify.ffi.cast('sp_playlistcontainer *', 42)
session = tests.create_real_session(lib_mock)
result = session.get_published_playlists('alice')
func_mock.assert_called_with(session._sp_session, b'alice')
self.assertIsInstance(result, spotify.PlaylistContainer)
# Since we *created* the sp_playlistcontainer, we already have a
# refcount of 1 and shouldn't increase the refcount when wrapping this
# sp_playlistcontainer in a PlaylistContainer object
self.assertEqual(
playlist_lib_mock.sp_playlistcontainer_add_ref.call_count, 0)
@mock.patch('spotify.playlist_container.lib', spec=spotify.lib)
def test_get_published_playlists_for_current_user(
self, playlist_lib_mock, lib_mock):
func_mock = lib_mock.sp_session_publishedcontainer_for_user_create
func_mock.return_value = spotify.ffi.cast('sp_playlistcontainer *', 42)
session = tests.create_real_session(lib_mock)
result = session.get_published_playlists()
func_mock.assert_called_with(session._sp_session, spotify.ffi.NULL)
self.assertIsInstance(result, spotify.PlaylistContainer)
def test_get_published_playlists_if_not_logged_in(self, lib_mock):
func_mock = lib_mock.sp_session_publishedcontainer_for_user_create
func_mock.return_value = spotify.ffi.NULL
session = tests.create_real_session(lib_mock)
result = session.get_published_playlists('alice')
func_mock.assert_called_with(session._sp_session, b'alice')
self.assertIsNone(result)
@mock.patch('spotify.Link')
def test_get_link(self, link_mock, lib_mock):
session = tests.create_real_session(lib_mock)
link_mock.return_value = mock.sentinel.link
result = session.get_link('spotify:any:foo')
self.assertIs(result, mock.sentinel.link)
link_mock.assert_called_with(session, uri='spotify:any:foo')
@mock.patch('spotify.Track')
def test_get_track(self, track_mock, lib_mock):
session = tests.create_real_session(lib_mock)
track_mock.return_value = mock.sentinel.track
result = session.get_track('spotify:track:foo')
self.assertIs(result, mock.sentinel.track)
track_mock.assert_called_with(session, uri='spotify:track:foo')
@mock.patch('spotify.Track')
def test_get_local_track(self, track_mock, lib_mock):
session = tests.create_real_session(lib_mock)
sp_track = spotify.ffi.cast('sp_track *', 42)
lib_mock.sp_localtrack_create.return_value = sp_track
track_mock.return_value = mock.sentinel.track
track = session.get_local_track(
artist='foo', title='bar', album='baz', length=210000)
self.assertEqual(track, mock.sentinel.track)
lib_mock.sp_localtrack_create.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, 210000)
self.assertEqual(
spotify.ffi.string(lib_mock.sp_localtrack_create.call_args[0][0]),
b'foo')
self.assertEqual(
spotify.ffi.string(lib_mock.sp_localtrack_create.call_args[0][1]),
b'bar')
self.assertEqual(
spotify.ffi.string(lib_mock.sp_localtrack_create.call_args[0][2]),
b'baz')
self.assertEqual(
lib_mock.sp_localtrack_create.call_args[0][3], 210000)
# Since we *created* the sp_track, we already have a refcount of 1 and
# shouldn't increase the refcount when wrapping this sp_track in a
# Track object
track_mock.assert_called_with(
session, sp_track=sp_track, add_ref=False)
@mock.patch('spotify.Track')
def test_get_local_track_with_defaults(self, track_mock, lib_mock):
session = tests.create_real_session(lib_mock)
sp_track = spotify.ffi.cast('sp_track *', 42)
lib_mock.sp_localtrack_create.return_value = sp_track
track_mock.return_value = mock.sentinel.track
track = session.get_local_track()
self.assertEqual(track, mock.sentinel.track)
lib_mock.sp_localtrack_create.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, -1)
self.assertEqual(
spotify.ffi.string(lib_mock.sp_localtrack_create.call_args[0][0]),
b'')
self.assertEqual(
spotify.ffi.string(lib_mock.sp_localtrack_create.call_args[0][1]),
b'')
self.assertEqual(
spotify.ffi.string(lib_mock.sp_localtrack_create.call_args[0][2]),
b'')
self.assertEqual(
lib_mock.sp_localtrack_create.call_args[0][3], -1)
# Since we *created* the sp_track, we already have a refcount of 1 and
# shouldn't increase the refcount when wrapping this sp_track in a
# Track object
track_mock.assert_called_with(
session, sp_track=sp_track, add_ref=False)
@mock.patch('spotify.Album')
def test_get_album(self, album_mock, lib_mock):
session = tests.create_real_session(lib_mock)
album_mock.return_value = mock.sentinel.album
result = session.get_album('spotify:album:foo')
self.assertIs(result, mock.sentinel.album)
album_mock.assert_called_with(session, uri='spotify:album:foo')
@mock.patch('spotify.Artist')
def test_get_artist(self, artist_mock, lib_mock):
session = tests.create_real_session(lib_mock)
artist_mock.return_value = mock.sentinel.artist
result = session.get_artist('spotify:artist:foo')
self.assertIs(result, mock.sentinel.artist)
artist_mock.assert_called_with(session, uri='spotify:artist:foo')
@mock.patch('spotify.Playlist')
def test_get_playlist(self, playlist_mock, lib_mock):
session = tests.create_real_session(lib_mock)
playlist_mock.return_value = mock.sentinel.playlist
result = session.get_playlist('spotify:playlist:foo')
self.assertIs(result, mock.sentinel.playlist)
playlist_mock.assert_called_with(session, uri='spotify:playlist:foo')
@mock.patch('spotify.User')
def test_get_user(self, user_mock, lib_mock):
session = tests.create_real_session(lib_mock)
user_mock.return_value = mock.sentinel.user
result = session.get_user('spotify:user:foo')
self.assertIs(result, mock.sentinel.user)
user_mock.assert_called_with(session, uri='spotify:user:foo')
@mock.patch('spotify.Image')
def test_get_image(self, image_mock, lib_mock):
session = tests.create_real_session(lib_mock)
callback = mock.Mock()
image_mock.return_value = mock.sentinel.image
result = session.get_image('spotify:image:foo', callback=callback)
self.assertIs(result, mock.sentinel.image)
image_mock.assert_called_with(
session, uri='spotify:image:foo', callback=callback)
@mock.patch('spotify.Search')
def test_search(self, search_mock, lib_mock):
session = tests.create_real_session(lib_mock)
search_mock.return_value = mock.sentinel.search
result = session.search('alice')
self.assertIs(result, mock.sentinel.search)
search_mock.assert_called_with(
session, query='alice', callback=None,
track_offset=0, track_count=20,
album_offset=0, album_count=20,
artist_offset=0, artist_count=20,
playlist_offset=0, playlist_count=20,
search_type=None)
@mock.patch('spotify.Toplist')
def test_toplist(self, toplist_mock, lib_mock):
session = tests.create_real_session(lib_mock)
toplist_mock.return_value = mock.sentinel.toplist
result = session.get_toplist(
type=spotify.ToplistType.TRACKS, region='NO')
self.assertIs(result, mock.sentinel.toplist)
toplist_mock.assert_called_with(
session, type=spotify.ToplistType.TRACKS, region='NO',
canonical_username=None, callback=None)
@mock.patch('spotify.session.lib', spec=spotify.lib)
class SessionCallbacksTest(unittest.TestCase):
def tearDown(self):
spotify._session_instance = None
def test_logged_in_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.LOGGED_IN, callback)
_SessionCallbacks.logged_in(
session._sp_session, int(spotify.ErrorType.BAD_API_VERSION))
callback.assert_called_once_with(
session, spotify.ErrorType.BAD_API_VERSION)
def test_logged_out_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.LOGGED_OUT, callback)
_SessionCallbacks.logged_out(session._sp_session)
callback.assert_called_once_with(session)
def test_metadata_updated_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.METADATA_UPDATED, callback)
_SessionCallbacks.metadata_updated(session._sp_session)
callback.assert_called_once_with(session)
def test_connection_error_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.CONNECTION_ERROR, callback)
_SessionCallbacks.connection_error(
session._sp_session, int(spotify.ErrorType.OK))
callback.assert_called_once_with(session, spotify.ErrorType.OK)
def test_message_to_user_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.MESSAGE_TO_USER, callback)
data = spotify.ffi.new('char[]', b'a log message\n')
_SessionCallbacks.message_to_user(session._sp_session, data)
callback.assert_called_once_with(session, 'a log message')
def test_notify_main_thread_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.NOTIFY_MAIN_THREAD, callback)
_SessionCallbacks.notify_main_thread(session._sp_session)
callback.assert_called_once_with(session)
def test_music_delivery_callback(self, lib_mock):
sp_audioformat = spotify.ffi.new('sp_audioformat *')
sp_audioformat.channels = 2
audio_format = spotify.AudioFormat(sp_audioformat)
num_frames = 10
frames_size = audio_format.frame_size() * num_frames
frames = spotify.ffi.new('char[]', frames_size)
frames[0:3] = [b'a', b'b', b'c']
frames_void_ptr = spotify.ffi.cast('void *', frames)
callback = mock.Mock()
callback.return_value = num_frames
session = tests.create_real_session(lib_mock)
session.on('music_delivery', callback)
result = _SessionCallbacks.music_delivery(
session._sp_session, sp_audioformat, frames_void_ptr, num_frames)
callback.assert_called_once_with(
session, mock.ANY, mock.ANY, num_frames)
self.assertEqual(
callback.call_args[0][1]._sp_audioformat, sp_audioformat)
self.assertEqual(callback.call_args[0][2][:5], b'abc\x00\x00')
self.assertEqual(result, num_frames)
def test_music_delivery_without_callback_does_not_consume(self, lib_mock):
session = tests.create_real_session(lib_mock)
sp_audioformat = spotify.ffi.new('sp_audioformat *')
num_frames = 10
frames = spotify.ffi.new('char[]', 0)
frames_void_ptr = spotify.ffi.cast('void *', frames)
result = _SessionCallbacks.music_delivery(
session._sp_session, sp_audioformat, frames_void_ptr, num_frames)
self.assertEqual(result, 0)
def test_play_token_lost_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.PLAY_TOKEN_LOST, callback)
_SessionCallbacks.play_token_lost(session._sp_session)
callback.assert_called_once_with(session)
def test_log_message_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.LOG_MESSAGE, callback)
data = spotify.ffi.new('char[]', b'a log message\n')
_SessionCallbacks.log_message(session._sp_session, data)
callback.assert_called_once_with(session, 'a log message')
def test_end_of_track_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.END_OF_TRACK, callback)
_SessionCallbacks.end_of_track(session._sp_session)
callback.assert_called_once_with(session)
def test_streaming_error_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.STREAMING_ERROR, callback)
_SessionCallbacks.streaming_error(
session._sp_session, int(spotify.ErrorType.NO_STREAM_AVAILABLE))
callback.assert_called_once_with(
session, spotify.ErrorType.NO_STREAM_AVAILABLE)
def test_user_info_updated_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.USER_INFO_UPDATED, callback)
_SessionCallbacks.user_info_updated(session._sp_session)
callback.assert_called_once_with(session)
def test_start_playback_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.START_PLAYBACK, callback)
_SessionCallbacks.start_playback(session._sp_session)
callback.assert_called_once_with(session)
def test_stop_playback_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.STOP_PLAYBACK, callback)
_SessionCallbacks.stop_playback(session._sp_session)
callback.assert_called_once_with(session)
def test_get_audio_buffer_stats_callback(self, lib_mock):
callback = mock.Mock()
callback.return_value = spotify.AudioBufferStats(100, 5)
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.GET_AUDIO_BUFFER_STATS, callback)
sp_audio_buffer_stats = spotify.ffi.new('sp_audio_buffer_stats *')
_SessionCallbacks.get_audio_buffer_stats(
session._sp_session, sp_audio_buffer_stats)
callback.assert_called_once_with(session)
self.assertEqual(sp_audio_buffer_stats.samples, 100)
self.assertEqual(sp_audio_buffer_stats.stutter, 5)
def test_offline_status_updated_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.OFFLINE_STATUS_UPDATED, callback)
_SessionCallbacks.offline_status_updated(session._sp_session)
callback.assert_called_once_with(session)
def test_credentials_blob_updated_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.CREDENTIALS_BLOB_UPDATED, callback)
data = spotify.ffi.new('char[]', b'a credentials blob')
_SessionCallbacks.credentials_blob_updated(
session._sp_session, data)
callback.assert_called_once_with(session, b'a credentials blob')
def test_connection_state_updated_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.CONNECTION_STATE_UPDATED, callback)
_SessionCallbacks.connection_state_updated(session._sp_session)
callback.assert_called_once_with(session)
def test_scrobble_error_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.SCROBBLE_ERROR, callback)
_SessionCallbacks.scrobble_error(
session._sp_session, int(spotify.ErrorType.LASTFM_AUTH_ERROR))
callback.assert_called_once_with(
session, spotify.ErrorType.LASTFM_AUTH_ERROR)
def test_private_session_mode_changed_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.PRIVATE_SESSION_MODE_CHANGED, callback)
_SessionCallbacks.private_session_mode_changed(
session._sp_session, 1)
callback.assert_called_once_with(session, True)
| 2.5 | 2 |
eval_extra_tester_v4.py | dgudenius/football_win_predictions | 4 | 12763649 | from util2 import *
from Pythag_Win import *
from Combined4 import *
# Read historical games from CSV
games = Util2.read_games("data/nfl_games.csv")
# Forecast every game
Combined4.combined4(games)
# Evaluate our forecasts against Elo
Util2.evaluate_forecasts(games)
| 1.9375 | 2 |
state_scrapper/state_scrapper/pipelines.py | nikmend/state-scrapper | 0 | 12763650 | <filename>state_scrapper/state_scrapper/pipelines.py
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exceptions import NotConfigured
import mysql.connector
import json
from itemadapter import ItemAdapter
from scrapy.exceptions import DropItem
class DuplicatesPipeline:
def __init__(self):
self.ids_seen = set()
def process_item(self, item, spider):
adapter = ItemAdapter(item)
if adapter['id_web'] in self.ids_seen:
raise DropItem("Duplicate item found: %r" % item)
print("Unique items ", len(self.ids_seen))
else:
self.ids_seen.add(adapter['id_web'])
return item
def close_spider(self, spider):
print("--------Unique total items ", len(self.ids_seen))
class StateScrapperPipeline(object):
def open_spider(self, spider):
self.file = open('items.jl', 'w')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
#print(item)
# self.file.write(line)
return item
class DatabasePipeline(object):
def __init__(self, database, user, password, host):
self.database = database
self.user = user
self.password = password
self.host = host
self.items = []
self.query = ""
def connectDB(self):
self.conn = mysql.connector.connect(
database=self.database,
host=self.host,
user=self.user, password=self.password,
charset='utf8', use_unicode=True)
self.cursor = self.conn.cursor()
def process_item(self, item, spider):
return item
@classmethod
def from_crawler(cls, crawler):
db_settings = crawler.settings.getdict("DB_SETTINGS")
if not db_settings: # if we don't define db config in settings
raise NotConfigured # then reaise error
database = db_settings['database']
user = db_settings['user']
password = db_settings['password']
host = db_settings['host']
return cls(database, user, password, host) # returning pipeline instance
def open_spider(self, spider):
self.connectDB()
def process_item(self, item, spider):
placeholders = ', '.join(['%s'] * len(item))
columns = ', '.join(item.keys())
self.query = "INSERT INTO %s ( %s ) VALUES ( %s )" % ("states", columns, placeholders)
self.items.extend([item.values()])
return item
def close_spider(self, spider):
try:
self.cursor.executemany(self.query, self.items)
self.conn.commit()
print("MYSQL " + str(self.cursor.rowcount) + " record inserted.")
self.items = []
except Exception as e:
if 'MySQL server has gone away' in str(e):
self.connectDB()
spider.cursor.executemany(self.query, self.items)
self.items = []
else:
raise e
self.conn.close()
| 2.4375 | 2 |